]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-math-opts.c
Add inline functions for various bitwise operations.
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115
116 /* This structure represents one basic block that either computes a
117 division, or is a common dominator for basic block that compute a
118 division. */
119 struct occurrence {
120 /* The basic block represented by this structure. */
121 basic_block bb;
122
123 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
124 inserted in BB. */
125 tree recip_def;
126
127 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
128 was inserted in BB. */
129 gimple *recip_def_stmt;
130
131 /* Pointer to a list of "struct occurrence"s for blocks dominated
132 by BB. */
133 struct occurrence *children;
134
135 /* Pointer to the next "struct occurrence"s in the list of blocks
136 sharing a common dominator. */
137 struct occurrence *next;
138
139 /* The number of divisions that are in BB before compute_merit. The
140 number of divisions that are in BB or post-dominate it after
141 compute_merit. */
142 int num_divisions;
143
144 /* True if the basic block has a division, false if it is a common
145 dominator for basic blocks that do. If it is false and trapping
146 math is active, BB is not a candidate for inserting a reciprocal. */
147 bool bb_has_division;
148 };
149
150 static struct
151 {
152 /* Number of 1.0/X ops inserted. */
153 int rdivs_inserted;
154
155 /* Number of 1.0/FUNC ops inserted. */
156 int rfuncs_inserted;
157 } reciprocal_stats;
158
159 static struct
160 {
161 /* Number of cexpi calls inserted. */
162 int inserted;
163 } sincos_stats;
164
165 static struct
166 {
167 /* Number of hand-written 16-bit nop / bswaps found. */
168 int found_16bit;
169
170 /* Number of hand-written 32-bit nop / bswaps found. */
171 int found_32bit;
172
173 /* Number of hand-written 64-bit nop / bswaps found. */
174 int found_64bit;
175 } nop_stats, bswap_stats;
176
177 static struct
178 {
179 /* Number of widening multiplication ops inserted. */
180 int widen_mults_inserted;
181
182 /* Number of integer multiply-and-accumulate ops inserted. */
183 int maccs_inserted;
184
185 /* Number of fp fused multiply-add ops inserted. */
186 int fmas_inserted;
187 } widen_mul_stats;
188
189 /* The instance of "struct occurrence" representing the highest
190 interesting block in the dominator tree. */
191 static struct occurrence *occ_head;
192
193 /* Allocation pool for getting instances of "struct occurrence". */
194 static object_allocator<occurrence> *occ_pool;
195
196
197
198 /* Allocate and return a new struct occurrence for basic block BB, and
199 whose children list is headed by CHILDREN. */
200 static struct occurrence *
201 occ_new (basic_block bb, struct occurrence *children)
202 {
203 struct occurrence *occ;
204
205 bb->aux = occ = occ_pool->allocate ();
206 memset (occ, 0, sizeof (struct occurrence));
207
208 occ->bb = bb;
209 occ->children = children;
210 return occ;
211 }
212
213
214 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
215 list of "struct occurrence"s, one per basic block, having IDOM as
216 their common dominator.
217
218 We try to insert NEW_OCC as deep as possible in the tree, and we also
219 insert any other block that is a common dominator for BB and one
220 block already in the tree. */
221
222 static void
223 insert_bb (struct occurrence *new_occ, basic_block idom,
224 struct occurrence **p_head)
225 {
226 struct occurrence *occ, **p_occ;
227
228 for (p_occ = p_head; (occ = *p_occ) != NULL; )
229 {
230 basic_block bb = new_occ->bb, occ_bb = occ->bb;
231 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
232 if (dom == bb)
233 {
234 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
235 from its list. */
236 *p_occ = occ->next;
237 occ->next = new_occ->children;
238 new_occ->children = occ;
239
240 /* Try the next block (it may as well be dominated by BB). */
241 }
242
243 else if (dom == occ_bb)
244 {
245 /* OCC_BB dominates BB. Tail recurse to look deeper. */
246 insert_bb (new_occ, dom, &occ->children);
247 return;
248 }
249
250 else if (dom != idom)
251 {
252 gcc_assert (!dom->aux);
253
254 /* There is a dominator between IDOM and BB, add it and make
255 two children out of NEW_OCC and OCC. First, remove OCC from
256 its list. */
257 *p_occ = occ->next;
258 new_occ->next = occ;
259 occ->next = NULL;
260
261 /* None of the previous blocks has DOM as a dominator: if we tail
262 recursed, we would reexamine them uselessly. Just switch BB with
263 DOM, and go on looking for blocks dominated by DOM. */
264 new_occ = occ_new (dom, new_occ);
265 }
266
267 else
268 {
269 /* Nothing special, go on with the next element. */
270 p_occ = &occ->next;
271 }
272 }
273
274 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
275 new_occ->next = *p_head;
276 *p_head = new_occ;
277 }
278
279 /* Register that we found a division in BB. */
280
281 static inline void
282 register_division_in (basic_block bb)
283 {
284 struct occurrence *occ;
285
286 occ = (struct occurrence *) bb->aux;
287 if (!occ)
288 {
289 occ = occ_new (bb, NULL);
290 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
291 }
292
293 occ->bb_has_division = true;
294 occ->num_divisions++;
295 }
296
297
298 /* Compute the number of divisions that postdominate each block in OCC and
299 its children. */
300
301 static void
302 compute_merit (struct occurrence *occ)
303 {
304 struct occurrence *occ_child;
305 basic_block dom = occ->bb;
306
307 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
308 {
309 basic_block bb;
310 if (occ_child->children)
311 compute_merit (occ_child);
312
313 if (flag_exceptions)
314 bb = single_noncomplex_succ (dom);
315 else
316 bb = dom;
317
318 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
319 occ->num_divisions += occ_child->num_divisions;
320 }
321 }
322
323
324 /* Return whether USE_STMT is a floating-point division by DEF. */
325 static inline bool
326 is_division_by (gimple *use_stmt, tree def)
327 {
328 return is_gimple_assign (use_stmt)
329 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
330 && gimple_assign_rhs2 (use_stmt) == def
331 /* Do not recognize x / x as valid division, as we are getting
332 confused later by replacing all immediate uses x in such
333 a stmt. */
334 && gimple_assign_rhs1 (use_stmt) != def;
335 }
336
337 /* Walk the subset of the dominator tree rooted at OCC, setting the
338 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
339 the given basic block. The field may be left NULL, of course,
340 if it is not possible or profitable to do the optimization.
341
342 DEF_BSI is an iterator pointing at the statement defining DEF.
343 If RECIP_DEF is set, a dominator already has a computation that can
344 be used. */
345
346 static void
347 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
348 tree def, tree recip_def, int threshold)
349 {
350 tree type;
351 gassign *new_stmt;
352 gimple_stmt_iterator gsi;
353 struct occurrence *occ_child;
354
355 if (!recip_def
356 && (occ->bb_has_division || !flag_trapping_math)
357 && occ->num_divisions >= threshold)
358 {
359 /* Make a variable with the replacement and substitute it. */
360 type = TREE_TYPE (def);
361 recip_def = create_tmp_reg (type, "reciptmp");
362 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
363 build_one_cst (type), def);
364
365 if (occ->bb_has_division)
366 {
367 /* Case 1: insert before an existing division. */
368 gsi = gsi_after_labels (occ->bb);
369 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
370 gsi_next (&gsi);
371
372 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
373 }
374 else if (def_gsi && occ->bb == def_gsi->bb)
375 {
376 /* Case 2: insert right after the definition. Note that this will
377 never happen if the definition statement can throw, because in
378 that case the sole successor of the statement's basic block will
379 dominate all the uses as well. */
380 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
381 }
382 else
383 {
384 /* Case 3: insert in a basic block not containing defs/uses. */
385 gsi = gsi_after_labels (occ->bb);
386 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
387 }
388
389 reciprocal_stats.rdivs_inserted++;
390
391 occ->recip_def_stmt = new_stmt;
392 }
393
394 occ->recip_def = recip_def;
395 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
396 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
397 }
398
399
400 /* Replace the division at USE_P with a multiplication by the reciprocal, if
401 possible. */
402
403 static inline void
404 replace_reciprocal (use_operand_p use_p)
405 {
406 gimple *use_stmt = USE_STMT (use_p);
407 basic_block bb = gimple_bb (use_stmt);
408 struct occurrence *occ = (struct occurrence *) bb->aux;
409
410 if (optimize_bb_for_speed_p (bb)
411 && occ->recip_def && use_stmt != occ->recip_def_stmt)
412 {
413 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
414 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
415 SET_USE (use_p, occ->recip_def);
416 fold_stmt_inplace (&gsi);
417 update_stmt (use_stmt);
418 }
419 }
420
421
422 /* Free OCC and return one more "struct occurrence" to be freed. */
423
424 static struct occurrence *
425 free_bb (struct occurrence *occ)
426 {
427 struct occurrence *child, *next;
428
429 /* First get the two pointers hanging off OCC. */
430 next = occ->next;
431 child = occ->children;
432 occ->bb->aux = NULL;
433 occ_pool->remove (occ);
434
435 /* Now ensure that we don't recurse unless it is necessary. */
436 if (!child)
437 return next;
438 else
439 {
440 while (next)
441 next = free_bb (next);
442
443 return child;
444 }
445 }
446
447
448 /* Look for floating-point divisions among DEF's uses, and try to
449 replace them by multiplications with the reciprocal. Add
450 as many statements computing the reciprocal as needed.
451
452 DEF must be a GIMPLE register of a floating-point type. */
453
454 static void
455 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
456 {
457 use_operand_p use_p;
458 imm_use_iterator use_iter;
459 struct occurrence *occ;
460 int count = 0, threshold;
461
462 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
463
464 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
465 {
466 gimple *use_stmt = USE_STMT (use_p);
467 if (is_division_by (use_stmt, def))
468 {
469 register_division_in (gimple_bb (use_stmt));
470 count++;
471 }
472 }
473
474 /* Do the expensive part only if we can hope to optimize something. */
475 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
476 if (count >= threshold)
477 {
478 gimple *use_stmt;
479 for (occ = occ_head; occ; occ = occ->next)
480 {
481 compute_merit (occ);
482 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
483 }
484
485 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
486 {
487 if (is_division_by (use_stmt, def))
488 {
489 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
490 replace_reciprocal (use_p);
491 }
492 }
493 }
494
495 for (occ = occ_head; occ; )
496 occ = free_bb (occ);
497
498 occ_head = NULL;
499 }
500
501 /* Return an internal function that implements the reciprocal of CALL,
502 or IFN_LAST if there is no such function that the target supports. */
503
504 internal_fn
505 internal_fn_reciprocal (gcall *call)
506 {
507 internal_fn ifn;
508
509 switch (gimple_call_combined_fn (call))
510 {
511 CASE_CFN_SQRT:
512 ifn = IFN_RSQRT;
513 break;
514
515 default:
516 return IFN_LAST;
517 }
518
519 tree_pair types = direct_internal_fn_types (ifn, call);
520 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
521 return IFN_LAST;
522
523 return ifn;
524 }
525
526 /* Go through all the floating-point SSA_NAMEs, and call
527 execute_cse_reciprocals_1 on each of them. */
528 namespace {
529
530 const pass_data pass_data_cse_reciprocals =
531 {
532 GIMPLE_PASS, /* type */
533 "recip", /* name */
534 OPTGROUP_NONE, /* optinfo_flags */
535 TV_NONE, /* tv_id */
536 PROP_ssa, /* properties_required */
537 0, /* properties_provided */
538 0, /* properties_destroyed */
539 0, /* todo_flags_start */
540 TODO_update_ssa, /* todo_flags_finish */
541 };
542
543 class pass_cse_reciprocals : public gimple_opt_pass
544 {
545 public:
546 pass_cse_reciprocals (gcc::context *ctxt)
547 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
548 {}
549
550 /* opt_pass methods: */
551 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
552 virtual unsigned int execute (function *);
553
554 }; // class pass_cse_reciprocals
555
556 unsigned int
557 pass_cse_reciprocals::execute (function *fun)
558 {
559 basic_block bb;
560 tree arg;
561
562 occ_pool = new object_allocator<occurrence> ("dominators for recip");
563
564 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
565 calculate_dominance_info (CDI_DOMINATORS);
566 calculate_dominance_info (CDI_POST_DOMINATORS);
567
568 if (flag_checking)
569 FOR_EACH_BB_FN (bb, fun)
570 gcc_assert (!bb->aux);
571
572 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
573 if (FLOAT_TYPE_P (TREE_TYPE (arg))
574 && is_gimple_reg (arg))
575 {
576 tree name = ssa_default_def (fun, arg);
577 if (name)
578 execute_cse_reciprocals_1 (NULL, name);
579 }
580
581 FOR_EACH_BB_FN (bb, fun)
582 {
583 tree def;
584
585 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
586 gsi_next (&gsi))
587 {
588 gphi *phi = gsi.phi ();
589 def = PHI_RESULT (phi);
590 if (! virtual_operand_p (def)
591 && FLOAT_TYPE_P (TREE_TYPE (def)))
592 execute_cse_reciprocals_1 (NULL, def);
593 }
594
595 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
596 gsi_next (&gsi))
597 {
598 gimple *stmt = gsi_stmt (gsi);
599
600 if (gimple_has_lhs (stmt)
601 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
602 && FLOAT_TYPE_P (TREE_TYPE (def))
603 && TREE_CODE (def) == SSA_NAME)
604 execute_cse_reciprocals_1 (&gsi, def);
605 }
606
607 if (optimize_bb_for_size_p (bb))
608 continue;
609
610 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
611 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
612 gsi_next (&gsi))
613 {
614 gimple *stmt = gsi_stmt (gsi);
615
616 if (is_gimple_assign (stmt)
617 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
618 {
619 tree arg1 = gimple_assign_rhs2 (stmt);
620 gimple *stmt1;
621
622 if (TREE_CODE (arg1) != SSA_NAME)
623 continue;
624
625 stmt1 = SSA_NAME_DEF_STMT (arg1);
626
627 if (is_gimple_call (stmt1)
628 && gimple_call_lhs (stmt1))
629 {
630 bool fail;
631 imm_use_iterator ui;
632 use_operand_p use_p;
633 tree fndecl = NULL_TREE;
634
635 gcall *call = as_a <gcall *> (stmt1);
636 internal_fn ifn = internal_fn_reciprocal (call);
637 if (ifn == IFN_LAST)
638 {
639 fndecl = gimple_call_fndecl (call);
640 if (!fndecl
641 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
642 continue;
643 fndecl = targetm.builtin_reciprocal (fndecl);
644 if (!fndecl)
645 continue;
646 }
647
648 /* Check that all uses of the SSA name are divisions,
649 otherwise replacing the defining statement will do
650 the wrong thing. */
651 fail = false;
652 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
653 {
654 gimple *stmt2 = USE_STMT (use_p);
655 if (is_gimple_debug (stmt2))
656 continue;
657 if (!is_gimple_assign (stmt2)
658 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
659 || gimple_assign_rhs1 (stmt2) == arg1
660 || gimple_assign_rhs2 (stmt2) != arg1)
661 {
662 fail = true;
663 break;
664 }
665 }
666 if (fail)
667 continue;
668
669 gimple_replace_ssa_lhs (call, arg1);
670 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
671 {
672 auto_vec<tree, 4> args;
673 for (unsigned int i = 0;
674 i < gimple_call_num_args (call); i++)
675 args.safe_push (gimple_call_arg (call, i));
676 gcall *stmt2;
677 if (ifn == IFN_LAST)
678 stmt2 = gimple_build_call_vec (fndecl, args);
679 else
680 stmt2 = gimple_build_call_internal_vec (ifn, args);
681 gimple_call_set_lhs (stmt2, arg1);
682 if (gimple_vdef (call))
683 {
684 gimple_set_vdef (stmt2, gimple_vdef (call));
685 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
686 }
687 gimple_set_vuse (stmt2, gimple_vuse (call));
688 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
689 gsi_replace (&gsi2, stmt2, true);
690 }
691 else
692 {
693 if (ifn == IFN_LAST)
694 gimple_call_set_fndecl (call, fndecl);
695 else
696 gimple_call_set_internal_fn (call, ifn);
697 update_stmt (call);
698 }
699 reciprocal_stats.rfuncs_inserted++;
700
701 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
702 {
703 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
704 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
705 fold_stmt_inplace (&gsi);
706 update_stmt (stmt);
707 }
708 }
709 }
710 }
711 }
712
713 statistics_counter_event (fun, "reciprocal divs inserted",
714 reciprocal_stats.rdivs_inserted);
715 statistics_counter_event (fun, "reciprocal functions inserted",
716 reciprocal_stats.rfuncs_inserted);
717
718 free_dominance_info (CDI_DOMINATORS);
719 free_dominance_info (CDI_POST_DOMINATORS);
720 delete occ_pool;
721 return 0;
722 }
723
724 } // anon namespace
725
726 gimple_opt_pass *
727 make_pass_cse_reciprocals (gcc::context *ctxt)
728 {
729 return new pass_cse_reciprocals (ctxt);
730 }
731
732 /* Records an occurrence at statement USE_STMT in the vector of trees
733 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
734 is not yet initialized. Returns true if the occurrence was pushed on
735 the vector. Adjusts *TOP_BB to be the basic block dominating all
736 statements in the vector. */
737
738 static bool
739 maybe_record_sincos (vec<gimple *> *stmts,
740 basic_block *top_bb, gimple *use_stmt)
741 {
742 basic_block use_bb = gimple_bb (use_stmt);
743 if (*top_bb
744 && (*top_bb == use_bb
745 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
746 stmts->safe_push (use_stmt);
747 else if (!*top_bb
748 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
749 {
750 stmts->safe_push (use_stmt);
751 *top_bb = use_bb;
752 }
753 else
754 return false;
755
756 return true;
757 }
758
759 /* Look for sin, cos and cexpi calls with the same argument NAME and
760 create a single call to cexpi CSEing the result in this case.
761 We first walk over all immediate uses of the argument collecting
762 statements that we can CSE in a vector and in a second pass replace
763 the statement rhs with a REALPART or IMAGPART expression on the
764 result of the cexpi call we insert before the use statement that
765 dominates all other candidates. */
766
767 static bool
768 execute_cse_sincos_1 (tree name)
769 {
770 gimple_stmt_iterator gsi;
771 imm_use_iterator use_iter;
772 tree fndecl, res, type;
773 gimple *def_stmt, *use_stmt, *stmt;
774 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
775 auto_vec<gimple *> stmts;
776 basic_block top_bb = NULL;
777 int i;
778 bool cfg_changed = false;
779
780 type = TREE_TYPE (name);
781 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
782 {
783 if (gimple_code (use_stmt) != GIMPLE_CALL
784 || !gimple_call_lhs (use_stmt))
785 continue;
786
787 switch (gimple_call_combined_fn (use_stmt))
788 {
789 CASE_CFN_COS:
790 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
791 break;
792
793 CASE_CFN_SIN:
794 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
795 break;
796
797 CASE_CFN_CEXPI:
798 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
799 break;
800
801 default:;
802 }
803 }
804
805 if (seen_cos + seen_sin + seen_cexpi <= 1)
806 return false;
807
808 /* Simply insert cexpi at the beginning of top_bb but not earlier than
809 the name def statement. */
810 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
811 if (!fndecl)
812 return false;
813 stmt = gimple_build_call (fndecl, 1, name);
814 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
815 gimple_call_set_lhs (stmt, res);
816
817 def_stmt = SSA_NAME_DEF_STMT (name);
818 if (!SSA_NAME_IS_DEFAULT_DEF (name)
819 && gimple_code (def_stmt) != GIMPLE_PHI
820 && gimple_bb (def_stmt) == top_bb)
821 {
822 gsi = gsi_for_stmt (def_stmt);
823 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
824 }
825 else
826 {
827 gsi = gsi_after_labels (top_bb);
828 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
829 }
830 sincos_stats.inserted++;
831
832 /* And adjust the recorded old call sites. */
833 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
834 {
835 tree rhs = NULL;
836
837 switch (gimple_call_combined_fn (use_stmt))
838 {
839 CASE_CFN_COS:
840 rhs = fold_build1 (REALPART_EXPR, type, res);
841 break;
842
843 CASE_CFN_SIN:
844 rhs = fold_build1 (IMAGPART_EXPR, type, res);
845 break;
846
847 CASE_CFN_CEXPI:
848 rhs = res;
849 break;
850
851 default:;
852 gcc_unreachable ();
853 }
854
855 /* Replace call with a copy. */
856 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
857
858 gsi = gsi_for_stmt (use_stmt);
859 gsi_replace (&gsi, stmt, true);
860 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
861 cfg_changed = true;
862 }
863
864 return cfg_changed;
865 }
866
867 /* To evaluate powi(x,n), the floating point value x raised to the
868 constant integer exponent n, we use a hybrid algorithm that
869 combines the "window method" with look-up tables. For an
870 introduction to exponentiation algorithms and "addition chains",
871 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
872 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
873 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
874 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
875
876 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
877 multiplications to inline before calling the system library's pow
878 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
879 so this default never requires calling pow, powf or powl. */
880
881 #ifndef POWI_MAX_MULTS
882 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
883 #endif
884
885 /* The size of the "optimal power tree" lookup table. All
886 exponents less than this value are simply looked up in the
887 powi_table below. This threshold is also used to size the
888 cache of pseudo registers that hold intermediate results. */
889 #define POWI_TABLE_SIZE 256
890
891 /* The size, in bits of the window, used in the "window method"
892 exponentiation algorithm. This is equivalent to a radix of
893 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
894 #define POWI_WINDOW_SIZE 3
895
896 /* The following table is an efficient representation of an
897 "optimal power tree". For each value, i, the corresponding
898 value, j, in the table states than an optimal evaluation
899 sequence for calculating pow(x,i) can be found by evaluating
900 pow(x,j)*pow(x,i-j). An optimal power tree for the first
901 100 integers is given in Knuth's "Seminumerical algorithms". */
902
903 static const unsigned char powi_table[POWI_TABLE_SIZE] =
904 {
905 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
906 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
907 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
908 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
909 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
910 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
911 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
912 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
913 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
914 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
915 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
916 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
917 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
918 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
919 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
920 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
921 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
922 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
923 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
924 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
925 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
926 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
927 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
928 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
929 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
930 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
931 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
932 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
933 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
934 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
935 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
936 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
937 };
938
939
940 /* Return the number of multiplications required to calculate
941 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
942 subroutine of powi_cost. CACHE is an array indicating
943 which exponents have already been calculated. */
944
945 static int
946 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
947 {
948 /* If we've already calculated this exponent, then this evaluation
949 doesn't require any additional multiplications. */
950 if (cache[n])
951 return 0;
952
953 cache[n] = true;
954 return powi_lookup_cost (n - powi_table[n], cache)
955 + powi_lookup_cost (powi_table[n], cache) + 1;
956 }
957
958 /* Return the number of multiplications required to calculate
959 powi(x,n) for an arbitrary x, given the exponent N. This
960 function needs to be kept in sync with powi_as_mults below. */
961
962 static int
963 powi_cost (HOST_WIDE_INT n)
964 {
965 bool cache[POWI_TABLE_SIZE];
966 unsigned HOST_WIDE_INT digit;
967 unsigned HOST_WIDE_INT val;
968 int result;
969
970 if (n == 0)
971 return 0;
972
973 /* Ignore the reciprocal when calculating the cost. */
974 val = (n < 0) ? -n : n;
975
976 /* Initialize the exponent cache. */
977 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
978 cache[1] = true;
979
980 result = 0;
981
982 while (val >= POWI_TABLE_SIZE)
983 {
984 if (val & 1)
985 {
986 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
987 result += powi_lookup_cost (digit, cache)
988 + POWI_WINDOW_SIZE + 1;
989 val >>= POWI_WINDOW_SIZE;
990 }
991 else
992 {
993 val >>= 1;
994 result++;
995 }
996 }
997
998 return result + powi_lookup_cost (val, cache);
999 }
1000
1001 /* Recursive subroutine of powi_as_mults. This function takes the
1002 array, CACHE, of already calculated exponents and an exponent N and
1003 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1004
1005 static tree
1006 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1007 HOST_WIDE_INT n, tree *cache)
1008 {
1009 tree op0, op1, ssa_target;
1010 unsigned HOST_WIDE_INT digit;
1011 gassign *mult_stmt;
1012
1013 if (n < POWI_TABLE_SIZE && cache[n])
1014 return cache[n];
1015
1016 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1017
1018 if (n < POWI_TABLE_SIZE)
1019 {
1020 cache[n] = ssa_target;
1021 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1022 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1023 }
1024 else if (n & 1)
1025 {
1026 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1029 }
1030 else
1031 {
1032 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1033 op1 = op0;
1034 }
1035
1036 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1037 gimple_set_location (mult_stmt, loc);
1038 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1039
1040 return ssa_target;
1041 }
1042
1043 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1044 This function needs to be kept in sync with powi_cost above. */
1045
1046 static tree
1047 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1048 tree arg0, HOST_WIDE_INT n)
1049 {
1050 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1051 gassign *div_stmt;
1052 tree target;
1053
1054 if (n == 0)
1055 return build_real (type, dconst1);
1056
1057 memset (cache, 0, sizeof (cache));
1058 cache[1] = arg0;
1059
1060 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1061 if (n >= 0)
1062 return result;
1063
1064 /* If the original exponent was negative, reciprocate the result. */
1065 target = make_temp_ssa_name (type, NULL, "powmult");
1066 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1067 build_real (type, dconst1), result);
1068 gimple_set_location (div_stmt, loc);
1069 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1070
1071 return target;
1072 }
1073
1074 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1075 location info LOC. If the arguments are appropriate, create an
1076 equivalent sequence of statements prior to GSI using an optimal
1077 number of multiplications, and return an expession holding the
1078 result. */
1079
1080 static tree
1081 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1082 tree arg0, HOST_WIDE_INT n)
1083 {
1084 /* Avoid largest negative number. */
1085 if (n != -n
1086 && ((n >= -1 && n <= 2)
1087 || (optimize_function_for_speed_p (cfun)
1088 && powi_cost (n) <= POWI_MAX_MULTS)))
1089 return powi_as_mults (gsi, loc, arg0, n);
1090
1091 return NULL_TREE;
1092 }
1093
1094 /* Build a gimple call statement that calls FN with argument ARG.
1095 Set the lhs of the call statement to a fresh SSA name. Insert the
1096 statement prior to GSI's current position, and return the fresh
1097 SSA name. */
1098
1099 static tree
1100 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1101 tree fn, tree arg)
1102 {
1103 gcall *call_stmt;
1104 tree ssa_target;
1105
1106 call_stmt = gimple_build_call (fn, 1, arg);
1107 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1108 gimple_set_lhs (call_stmt, ssa_target);
1109 gimple_set_location (call_stmt, loc);
1110 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1111
1112 return ssa_target;
1113 }
1114
1115 /* Build a gimple binary operation with the given CODE and arguments
1116 ARG0, ARG1, assigning the result to a new SSA name for variable
1117 TARGET. Insert the statement prior to GSI's current position, and
1118 return the fresh SSA name.*/
1119
1120 static tree
1121 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1122 const char *name, enum tree_code code,
1123 tree arg0, tree arg1)
1124 {
1125 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1126 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1127 gimple_set_location (stmt, loc);
1128 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1129 return result;
1130 }
1131
1132 /* Build a gimple reference operation with the given CODE and argument
1133 ARG, assigning the result to a new SSA name of TYPE with NAME.
1134 Insert the statement prior to GSI's current position, and return
1135 the fresh SSA name. */
1136
1137 static inline tree
1138 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1139 const char *name, enum tree_code code, tree arg0)
1140 {
1141 tree result = make_temp_ssa_name (type, NULL, name);
1142 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1143 gimple_set_location (stmt, loc);
1144 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1145 return result;
1146 }
1147
1148 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1149 prior to GSI's current position, and return the fresh SSA name. */
1150
1151 static tree
1152 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1153 tree type, tree val)
1154 {
1155 tree result = make_ssa_name (type);
1156 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1157 gimple_set_location (stmt, loc);
1158 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1159 return result;
1160 }
1161
1162 struct pow_synth_sqrt_info
1163 {
1164 bool *factors;
1165 unsigned int deepest;
1166 unsigned int num_mults;
1167 };
1168
1169 /* Return true iff the real value C can be represented as a
1170 sum of powers of 0.5 up to N. That is:
1171 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1172 Record in INFO the various parameters of the synthesis algorithm such
1173 as the factors a[i], the maximum 0.5 power and the number of
1174 multiplications that will be required. */
1175
1176 bool
1177 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1178 struct pow_synth_sqrt_info *info)
1179 {
1180 REAL_VALUE_TYPE factor = dconsthalf;
1181 REAL_VALUE_TYPE remainder = c;
1182
1183 info->deepest = 0;
1184 info->num_mults = 0;
1185 memset (info->factors, 0, n * sizeof (bool));
1186
1187 for (unsigned i = 0; i < n; i++)
1188 {
1189 REAL_VALUE_TYPE res;
1190
1191 /* If something inexact happened bail out now. */
1192 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1193 return false;
1194
1195 /* We have hit zero. The number is representable as a sum
1196 of powers of 0.5. */
1197 if (real_equal (&res, &dconst0))
1198 {
1199 info->factors[i] = true;
1200 info->deepest = i + 1;
1201 return true;
1202 }
1203 else if (!REAL_VALUE_NEGATIVE (res))
1204 {
1205 remainder = res;
1206 info->factors[i] = true;
1207 info->num_mults++;
1208 }
1209 else
1210 info->factors[i] = false;
1211
1212 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1213 }
1214 return false;
1215 }
1216
1217 /* Return the tree corresponding to FN being applied
1218 to ARG N times at GSI and LOC.
1219 Look up previous results from CACHE if need be.
1220 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1221
1222 static tree
1223 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1224 tree fn, location_t loc, tree *cache)
1225 {
1226 tree res = cache[n];
1227 if (!res)
1228 {
1229 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1230 res = build_and_insert_call (gsi, loc, fn, prev);
1231 cache[n] = res;
1232 }
1233
1234 return res;
1235 }
1236
1237 /* Print to STREAM the repeated application of function FNAME to ARG
1238 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1239 "foo (foo (x))". */
1240
1241 static void
1242 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1243 unsigned int n)
1244 {
1245 if (n == 0)
1246 fprintf (stream, "%s", arg);
1247 else
1248 {
1249 fprintf (stream, "%s (", fname);
1250 print_nested_fn (stream, fname, arg, n - 1);
1251 fprintf (stream, ")");
1252 }
1253 }
1254
1255 /* Print to STREAM the fractional sequence of sqrt chains
1256 applied to ARG, described by INFO. Used for the dump file. */
1257
1258 static void
1259 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1260 struct pow_synth_sqrt_info *info)
1261 {
1262 for (unsigned int i = 0; i < info->deepest; i++)
1263 {
1264 bool is_set = info->factors[i];
1265 if (is_set)
1266 {
1267 print_nested_fn (stream, "sqrt", arg, i + 1);
1268 if (i != info->deepest - 1)
1269 fprintf (stream, " * ");
1270 }
1271 }
1272 }
1273
1274 /* Print to STREAM a representation of raising ARG to an integer
1275 power N. Used for the dump file. */
1276
1277 static void
1278 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1279 {
1280 if (n > 1)
1281 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1282 else if (n == 1)
1283 fprintf (stream, "%s", arg);
1284 }
1285
1286 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1287 square roots. Place at GSI and LOC. Limit the maximum depth
1288 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1289 result of the expanded sequence or NULL_TREE if the expansion failed.
1290
1291 This routine assumes that ARG1 is a real number with a fractional part
1292 (the integer exponent case will have been handled earlier in
1293 gimple_expand_builtin_pow).
1294
1295 For ARG1 > 0.0:
1296 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1297 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1298 FRAC_PART == ARG1 - WHOLE_PART:
1299 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1300 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1301 if it can be expressed as such, that is if FRAC_PART satisfies:
1302 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1303 where integer a[i] is either 0 or 1.
1304
1305 Example:
1306 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1307 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1308
1309 For ARG1 < 0.0 there are two approaches:
1310 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1311 is calculated as above.
1312
1313 Example:
1314 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1315 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1316
1317 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1318 FRAC_PART := ARG1 - WHOLE_PART
1319 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1320 Example:
1321 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1322 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1323
1324 For ARG1 < 0.0 we choose between (A) and (B) depending on
1325 how many multiplications we'd have to do.
1326 So, for the example in (B): POW (x, -5.875), if we were to
1327 follow algorithm (A) we would produce:
1328 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1329 which contains more multiplications than approach (B).
1330
1331 Hopefully, this approach will eliminate potentially expensive POW library
1332 calls when unsafe floating point math is enabled and allow the compiler to
1333 further optimise the multiplies, square roots and divides produced by this
1334 function. */
1335
1336 static tree
1337 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1338 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1339 {
1340 tree type = TREE_TYPE (arg0);
1341 machine_mode mode = TYPE_MODE (type);
1342 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1343 bool one_over = true;
1344
1345 if (!sqrtfn)
1346 return NULL_TREE;
1347
1348 if (TREE_CODE (arg1) != REAL_CST)
1349 return NULL_TREE;
1350
1351 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1352
1353 gcc_assert (max_depth > 0);
1354 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1355
1356 struct pow_synth_sqrt_info synth_info;
1357 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1358 synth_info.deepest = 0;
1359 synth_info.num_mults = 0;
1360
1361 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1362 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1363
1364 /* The whole and fractional parts of exp. */
1365 REAL_VALUE_TYPE whole_part;
1366 REAL_VALUE_TYPE frac_part;
1367
1368 real_floor (&whole_part, mode, &exp);
1369 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1370
1371
1372 REAL_VALUE_TYPE ceil_whole = dconst0;
1373 REAL_VALUE_TYPE ceil_fract = dconst0;
1374
1375 if (neg_exp)
1376 {
1377 real_ceil (&ceil_whole, mode, &exp);
1378 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1379 }
1380
1381 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1382 return NULL_TREE;
1383
1384 /* Check whether it's more profitable to not use 1.0 / ... */
1385 if (neg_exp)
1386 {
1387 struct pow_synth_sqrt_info alt_synth_info;
1388 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1389 alt_synth_info.deepest = 0;
1390 alt_synth_info.num_mults = 0;
1391
1392 if (representable_as_half_series_p (ceil_fract, max_depth,
1393 &alt_synth_info)
1394 && alt_synth_info.deepest <= synth_info.deepest
1395 && alt_synth_info.num_mults < synth_info.num_mults)
1396 {
1397 whole_part = ceil_whole;
1398 frac_part = ceil_fract;
1399 synth_info.deepest = alt_synth_info.deepest;
1400 synth_info.num_mults = alt_synth_info.num_mults;
1401 memcpy (synth_info.factors, alt_synth_info.factors,
1402 (max_depth + 1) * sizeof (bool));
1403 one_over = false;
1404 }
1405 }
1406
1407 HOST_WIDE_INT n = real_to_integer (&whole_part);
1408 REAL_VALUE_TYPE cint;
1409 real_from_integer (&cint, VOIDmode, n, SIGNED);
1410
1411 if (!real_identical (&whole_part, &cint))
1412 return NULL_TREE;
1413
1414 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1415 return NULL_TREE;
1416
1417 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1418
1419 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1420
1421 /* Calculate the integer part of the exponent. */
1422 if (n > 1)
1423 {
1424 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1425 if (!integer_res)
1426 return NULL_TREE;
1427 }
1428
1429 if (dump_file)
1430 {
1431 char string[64];
1432
1433 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1434 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1435
1436 if (neg_exp)
1437 {
1438 if (one_over)
1439 {
1440 fprintf (dump_file, "1.0 / (");
1441 dump_integer_part (dump_file, "x", n);
1442 if (n > 0)
1443 fprintf (dump_file, " * ");
1444 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1445 fprintf (dump_file, ")");
1446 }
1447 else
1448 {
1449 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1450 fprintf (dump_file, " / (");
1451 dump_integer_part (dump_file, "x", n);
1452 fprintf (dump_file, ")");
1453 }
1454 }
1455 else
1456 {
1457 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1458 if (n > 0)
1459 fprintf (dump_file, " * ");
1460 dump_integer_part (dump_file, "x", n);
1461 }
1462
1463 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1464 }
1465
1466
1467 tree fract_res = NULL_TREE;
1468 cache[0] = arg0;
1469
1470 /* Calculate the fractional part of the exponent. */
1471 for (unsigned i = 0; i < synth_info.deepest; i++)
1472 {
1473 if (synth_info.factors[i])
1474 {
1475 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1476
1477 if (!fract_res)
1478 fract_res = sqrt_chain;
1479
1480 else
1481 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1482 fract_res, sqrt_chain);
1483 }
1484 }
1485
1486 tree res = NULL_TREE;
1487
1488 if (neg_exp)
1489 {
1490 if (one_over)
1491 {
1492 if (n > 0)
1493 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1494 fract_res, integer_res);
1495 else
1496 res = fract_res;
1497
1498 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1499 build_real (type, dconst1), res);
1500 }
1501 else
1502 {
1503 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1504 fract_res, integer_res);
1505 }
1506 }
1507 else
1508 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1509 fract_res, integer_res);
1510 return res;
1511 }
1512
1513 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1514 with location info LOC. If possible, create an equivalent and
1515 less expensive sequence of statements prior to GSI, and return an
1516 expession holding the result. */
1517
1518 static tree
1519 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1520 tree arg0, tree arg1)
1521 {
1522 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1523 REAL_VALUE_TYPE c2, dconst3;
1524 HOST_WIDE_INT n;
1525 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1526 machine_mode mode;
1527 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1528 bool hw_sqrt_exists, c_is_int, c2_is_int;
1529
1530 dconst1_4 = dconst1;
1531 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1532
1533 /* If the exponent isn't a constant, there's nothing of interest
1534 to be done. */
1535 if (TREE_CODE (arg1) != REAL_CST)
1536 return NULL_TREE;
1537
1538 /* Don't perform the operation if flag_signaling_nans is on
1539 and the operand is a signaling NaN. */
1540 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1541 && ((TREE_CODE (arg0) == REAL_CST
1542 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1543 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1544 return NULL_TREE;
1545
1546 /* If the exponent is equivalent to an integer, expand to an optimal
1547 multiplication sequence when profitable. */
1548 c = TREE_REAL_CST (arg1);
1549 n = real_to_integer (&c);
1550 real_from_integer (&cint, VOIDmode, n, SIGNED);
1551 c_is_int = real_identical (&c, &cint);
1552
1553 if (c_is_int
1554 && ((n >= -1 && n <= 2)
1555 || (flag_unsafe_math_optimizations
1556 && speed_p
1557 && powi_cost (n) <= POWI_MAX_MULTS)))
1558 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1559
1560 /* Attempt various optimizations using sqrt and cbrt. */
1561 type = TREE_TYPE (arg0);
1562 mode = TYPE_MODE (type);
1563 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1564
1565 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1566 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1567 sqrt(-0) = -0. */
1568 if (sqrtfn
1569 && real_equal (&c, &dconsthalf)
1570 && !HONOR_SIGNED_ZEROS (mode))
1571 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1572
1573 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1574
1575 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1576 optimizations since 1./3. is not exactly representable. If x
1577 is negative and finite, the correct value of pow(x,1./3.) is
1578 a NaN with the "invalid" exception raised, because the value
1579 of 1./3. actually has an even denominator. The correct value
1580 of cbrt(x) is a negative real value. */
1581 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1582 dconst1_3 = real_value_truncate (mode, dconst_third ());
1583
1584 if (flag_unsafe_math_optimizations
1585 && cbrtfn
1586 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1587 && real_equal (&c, &dconst1_3))
1588 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1589
1590 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1591 if we don't have a hardware sqrt insn. */
1592 dconst1_6 = dconst1_3;
1593 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1594
1595 if (flag_unsafe_math_optimizations
1596 && sqrtfn
1597 && cbrtfn
1598 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1599 && speed_p
1600 && hw_sqrt_exists
1601 && real_equal (&c, &dconst1_6))
1602 {
1603 /* sqrt(x) */
1604 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1605
1606 /* cbrt(sqrt(x)) */
1607 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1608 }
1609
1610
1611 /* Attempt to expand the POW as a product of square root chains.
1612 Expand the 0.25 case even when otpimising for size. */
1613 if (flag_unsafe_math_optimizations
1614 && sqrtfn
1615 && hw_sqrt_exists
1616 && (speed_p || real_equal (&c, &dconst1_4))
1617 && !HONOR_SIGNED_ZEROS (mode))
1618 {
1619 unsigned int max_depth = speed_p
1620 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1621 : 2;
1622
1623 tree expand_with_sqrts
1624 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1625
1626 if (expand_with_sqrts)
1627 return expand_with_sqrts;
1628 }
1629
1630 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1631 n = real_to_integer (&c2);
1632 real_from_integer (&cint, VOIDmode, n, SIGNED);
1633 c2_is_int = real_identical (&c2, &cint);
1634
1635 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1636
1637 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1638 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1639
1640 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1641 different from pow(x, 1./3.) due to rounding and behavior with
1642 negative x, we need to constrain this transformation to unsafe
1643 math and positive x or finite math. */
1644 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1645 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1646 real_round (&c2, mode, &c2);
1647 n = real_to_integer (&c2);
1648 real_from_integer (&cint, VOIDmode, n, SIGNED);
1649 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1650 real_convert (&c2, mode, &c2);
1651
1652 if (flag_unsafe_math_optimizations
1653 && cbrtfn
1654 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1655 && real_identical (&c2, &c)
1656 && !c2_is_int
1657 && optimize_function_for_speed_p (cfun)
1658 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1659 {
1660 tree powi_x_ndiv3 = NULL_TREE;
1661
1662 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1663 possible or profitable, give up. Skip the degenerate case when
1664 abs(n) < 3, where the result is always 1. */
1665 if (absu_hwi (n) >= 3)
1666 {
1667 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1668 abs_hwi (n / 3));
1669 if (!powi_x_ndiv3)
1670 return NULL_TREE;
1671 }
1672
1673 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1674 as that creates an unnecessary variable. Instead, just produce
1675 either cbrt(x) or cbrt(x) * cbrt(x). */
1676 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1677
1678 if (absu_hwi (n) % 3 == 1)
1679 powi_cbrt_x = cbrt_x;
1680 else
1681 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1682 cbrt_x, cbrt_x);
1683
1684 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1685 if (absu_hwi (n) < 3)
1686 result = powi_cbrt_x;
1687 else
1688 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1689 powi_x_ndiv3, powi_cbrt_x);
1690
1691 /* If n is negative, reciprocate the result. */
1692 if (n < 0)
1693 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1694 build_real (type, dconst1), result);
1695
1696 return result;
1697 }
1698
1699 /* No optimizations succeeded. */
1700 return NULL_TREE;
1701 }
1702
1703 /* ARG is the argument to a cabs builtin call in GSI with location info
1704 LOC. Create a sequence of statements prior to GSI that calculates
1705 sqrt(R*R + I*I), where R and I are the real and imaginary components
1706 of ARG, respectively. Return an expression holding the result. */
1707
1708 static tree
1709 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1710 {
1711 tree real_part, imag_part, addend1, addend2, sum, result;
1712 tree type = TREE_TYPE (TREE_TYPE (arg));
1713 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1714 machine_mode mode = TYPE_MODE (type);
1715
1716 if (!flag_unsafe_math_optimizations
1717 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1718 || !sqrtfn
1719 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1720 return NULL_TREE;
1721
1722 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1723 REALPART_EXPR, arg);
1724 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1725 real_part, real_part);
1726 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1727 IMAGPART_EXPR, arg);
1728 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1729 imag_part, imag_part);
1730 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1731 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1732
1733 return result;
1734 }
1735
1736 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1737 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1738 an optimal number of multiplies, when n is a constant. */
1739
1740 namespace {
1741
1742 const pass_data pass_data_cse_sincos =
1743 {
1744 GIMPLE_PASS, /* type */
1745 "sincos", /* name */
1746 OPTGROUP_NONE, /* optinfo_flags */
1747 TV_NONE, /* tv_id */
1748 PROP_ssa, /* properties_required */
1749 PROP_gimple_opt_math, /* properties_provided */
1750 0, /* properties_destroyed */
1751 0, /* todo_flags_start */
1752 TODO_update_ssa, /* todo_flags_finish */
1753 };
1754
1755 class pass_cse_sincos : public gimple_opt_pass
1756 {
1757 public:
1758 pass_cse_sincos (gcc::context *ctxt)
1759 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1760 {}
1761
1762 /* opt_pass methods: */
1763 virtual bool gate (function *)
1764 {
1765 /* We no longer require either sincos or cexp, since powi expansion
1766 piggybacks on this pass. */
1767 return optimize;
1768 }
1769
1770 virtual unsigned int execute (function *);
1771
1772 }; // class pass_cse_sincos
1773
1774 unsigned int
1775 pass_cse_sincos::execute (function *fun)
1776 {
1777 basic_block bb;
1778 bool cfg_changed = false;
1779
1780 calculate_dominance_info (CDI_DOMINATORS);
1781 memset (&sincos_stats, 0, sizeof (sincos_stats));
1782
1783 FOR_EACH_BB_FN (bb, fun)
1784 {
1785 gimple_stmt_iterator gsi;
1786 bool cleanup_eh = false;
1787
1788 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1789 {
1790 gimple *stmt = gsi_stmt (gsi);
1791
1792 /* Only the last stmt in a bb could throw, no need to call
1793 gimple_purge_dead_eh_edges if we change something in the middle
1794 of a basic block. */
1795 cleanup_eh = false;
1796
1797 if (is_gimple_call (stmt)
1798 && gimple_call_lhs (stmt))
1799 {
1800 tree arg, arg0, arg1, result;
1801 HOST_WIDE_INT n;
1802 location_t loc;
1803
1804 switch (gimple_call_combined_fn (stmt))
1805 {
1806 CASE_CFN_COS:
1807 CASE_CFN_SIN:
1808 CASE_CFN_CEXPI:
1809 /* Make sure we have either sincos or cexp. */
1810 if (!targetm.libc_has_function (function_c99_math_complex)
1811 && !targetm.libc_has_function (function_sincos))
1812 break;
1813
1814 arg = gimple_call_arg (stmt, 0);
1815 if (TREE_CODE (arg) == SSA_NAME)
1816 cfg_changed |= execute_cse_sincos_1 (arg);
1817 break;
1818
1819 CASE_CFN_POW:
1820 arg0 = gimple_call_arg (stmt, 0);
1821 arg1 = gimple_call_arg (stmt, 1);
1822
1823 loc = gimple_location (stmt);
1824 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1825
1826 if (result)
1827 {
1828 tree lhs = gimple_get_lhs (stmt);
1829 gassign *new_stmt = gimple_build_assign (lhs, result);
1830 gimple_set_location (new_stmt, loc);
1831 unlink_stmt_vdef (stmt);
1832 gsi_replace (&gsi, new_stmt, true);
1833 cleanup_eh = true;
1834 if (gimple_vdef (stmt))
1835 release_ssa_name (gimple_vdef (stmt));
1836 }
1837 break;
1838
1839 CASE_CFN_POWI:
1840 arg0 = gimple_call_arg (stmt, 0);
1841 arg1 = gimple_call_arg (stmt, 1);
1842 loc = gimple_location (stmt);
1843
1844 if (real_minus_onep (arg0))
1845 {
1846 tree t0, t1, cond, one, minus_one;
1847 gassign *stmt;
1848
1849 t0 = TREE_TYPE (arg0);
1850 t1 = TREE_TYPE (arg1);
1851 one = build_real (t0, dconst1);
1852 minus_one = build_real (t0, dconstm1);
1853
1854 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1855 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1856 arg1, build_int_cst (t1, 1));
1857 gimple_set_location (stmt, loc);
1858 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1859
1860 result = make_temp_ssa_name (t0, NULL, "powi");
1861 stmt = gimple_build_assign (result, COND_EXPR, cond,
1862 minus_one, one);
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1865 }
1866 else
1867 {
1868 if (!tree_fits_shwi_p (arg1))
1869 break;
1870
1871 n = tree_to_shwi (arg1);
1872 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1873 }
1874
1875 if (result)
1876 {
1877 tree lhs = gimple_get_lhs (stmt);
1878 gassign *new_stmt = gimple_build_assign (lhs, result);
1879 gimple_set_location (new_stmt, loc);
1880 unlink_stmt_vdef (stmt);
1881 gsi_replace (&gsi, new_stmt, true);
1882 cleanup_eh = true;
1883 if (gimple_vdef (stmt))
1884 release_ssa_name (gimple_vdef (stmt));
1885 }
1886 break;
1887
1888 CASE_CFN_CABS:
1889 arg0 = gimple_call_arg (stmt, 0);
1890 loc = gimple_location (stmt);
1891 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1892
1893 if (result)
1894 {
1895 tree lhs = gimple_get_lhs (stmt);
1896 gassign *new_stmt = gimple_build_assign (lhs, result);
1897 gimple_set_location (new_stmt, loc);
1898 unlink_stmt_vdef (stmt);
1899 gsi_replace (&gsi, new_stmt, true);
1900 cleanup_eh = true;
1901 if (gimple_vdef (stmt))
1902 release_ssa_name (gimple_vdef (stmt));
1903 }
1904 break;
1905
1906 default:;
1907 }
1908 }
1909 }
1910 if (cleanup_eh)
1911 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1912 }
1913
1914 statistics_counter_event (fun, "sincos statements inserted",
1915 sincos_stats.inserted);
1916
1917 return cfg_changed ? TODO_cleanup_cfg : 0;
1918 }
1919
1920 } // anon namespace
1921
1922 gimple_opt_pass *
1923 make_pass_cse_sincos (gcc::context *ctxt)
1924 {
1925 return new pass_cse_sincos (ctxt);
1926 }
1927
1928 /* A symbolic number is used to detect byte permutation and selection
1929 patterns. Therefore the field N contains an artificial number
1930 consisting of octet sized markers:
1931
1932 0 - target byte has the value 0
1933 FF - target byte has an unknown value (eg. due to sign extension)
1934 1..size - marker value is the target byte index minus one.
1935
1936 To detect permutations on memory sources (arrays and structures), a symbolic
1937 number is also associated a base address (the array or structure the load is
1938 made from), an offset from the base address and a range which gives the
1939 difference between the highest and lowest accessed memory location to make
1940 such a symbolic number. The range is thus different from size which reflects
1941 the size of the type of current expression. Note that for non memory source,
1942 range holds the same value as size.
1943
1944 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1945 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1946 still have a size of 2 but this time a range of 1. */
1947
1948 struct symbolic_number {
1949 uint64_t n;
1950 tree type;
1951 tree base_addr;
1952 tree offset;
1953 HOST_WIDE_INT bytepos;
1954 tree alias_set;
1955 tree vuse;
1956 unsigned HOST_WIDE_INT range;
1957 };
1958
1959 #define BITS_PER_MARKER 8
1960 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1961 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1962 #define HEAD_MARKER(n, size) \
1963 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1964
1965 /* The number which the find_bswap_or_nop_1 result should match in
1966 order to have a nop. The number is masked according to the size of
1967 the symbolic number before using it. */
1968 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1969 (uint64_t)0x08070605 << 32 | 0x04030201)
1970
1971 /* The number which the find_bswap_or_nop_1 result should match in
1972 order to have a byte swap. The number is masked according to the
1973 size of the symbolic number before using it. */
1974 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1975 (uint64_t)0x01020304 << 32 | 0x05060708)
1976
1977 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1978 number N. Return false if the requested operation is not permitted
1979 on a symbolic number. */
1980
1981 static inline bool
1982 do_shift_rotate (enum tree_code code,
1983 struct symbolic_number *n,
1984 int count)
1985 {
1986 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1987 unsigned head_marker;
1988
1989 if (count % BITS_PER_UNIT != 0)
1990 return false;
1991 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
1992
1993 /* Zero out the extra bits of N in order to avoid them being shifted
1994 into the significant bits. */
1995 if (size < 64 / BITS_PER_MARKER)
1996 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
1997
1998 switch (code)
1999 {
2000 case LSHIFT_EXPR:
2001 n->n <<= count;
2002 break;
2003 case RSHIFT_EXPR:
2004 head_marker = HEAD_MARKER (n->n, size);
2005 n->n >>= count;
2006 /* Arithmetic shift of signed type: result is dependent on the value. */
2007 if (!TYPE_UNSIGNED (n->type) && head_marker)
2008 for (i = 0; i < count / BITS_PER_MARKER; i++)
2009 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2010 << ((size - 1 - i) * BITS_PER_MARKER);
2011 break;
2012 case LROTATE_EXPR:
2013 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2014 break;
2015 case RROTATE_EXPR:
2016 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2017 break;
2018 default:
2019 return false;
2020 }
2021 /* Zero unused bits for size. */
2022 if (size < 64 / BITS_PER_MARKER)
2023 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2024 return true;
2025 }
2026
2027 /* Perform sanity checking for the symbolic number N and the gimple
2028 statement STMT. */
2029
2030 static inline bool
2031 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2032 {
2033 tree lhs_type;
2034
2035 lhs_type = gimple_expr_type (stmt);
2036
2037 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2038 return false;
2039
2040 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2041 return false;
2042
2043 return true;
2044 }
2045
2046 /* Initialize the symbolic number N for the bswap pass from the base element
2047 SRC manipulated by the bitwise OR expression. */
2048
2049 static bool
2050 init_symbolic_number (struct symbolic_number *n, tree src)
2051 {
2052 int size;
2053
2054 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2055 return false;
2056
2057 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2058
2059 /* Set up the symbolic number N by setting each byte to a value between 1 and
2060 the byte size of rhs1. The highest order byte is set to n->size and the
2061 lowest order byte to 1. */
2062 n->type = TREE_TYPE (src);
2063 size = TYPE_PRECISION (n->type);
2064 if (size % BITS_PER_UNIT != 0)
2065 return false;
2066 size /= BITS_PER_UNIT;
2067 if (size > 64 / BITS_PER_MARKER)
2068 return false;
2069 n->range = size;
2070 n->n = CMPNOP;
2071
2072 if (size < 64 / BITS_PER_MARKER)
2073 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2074
2075 return true;
2076 }
2077
2078 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2079 the answer. If so, REF is that memory source and the base of the memory area
2080 accessed and the offset of the access from that base are recorded in N. */
2081
2082 bool
2083 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2084 {
2085 /* Leaf node is an array or component ref. Memorize its base and
2086 offset from base to compare to other such leaf node. */
2087 HOST_WIDE_INT bitsize, bitpos;
2088 machine_mode mode;
2089 int unsignedp, reversep, volatilep;
2090 tree offset, base_addr;
2091
2092 /* Not prepared to handle PDP endian. */
2093 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2094 return false;
2095
2096 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2097 return false;
2098
2099 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2100 &unsignedp, &reversep, &volatilep);
2101
2102 if (TREE_CODE (base_addr) == MEM_REF)
2103 {
2104 offset_int bit_offset = 0;
2105 tree off = TREE_OPERAND (base_addr, 1);
2106
2107 if (!integer_zerop (off))
2108 {
2109 offset_int boff, coff = mem_ref_offset (base_addr);
2110 boff = coff << LOG2_BITS_PER_UNIT;
2111 bit_offset += boff;
2112 }
2113
2114 base_addr = TREE_OPERAND (base_addr, 0);
2115
2116 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2117 if (wi::neg_p (bit_offset))
2118 {
2119 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2120 offset_int tem = bit_offset.and_not (mask);
2121 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2122 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2123 bit_offset -= tem;
2124 tem >>= LOG2_BITS_PER_UNIT;
2125 if (offset)
2126 offset = size_binop (PLUS_EXPR, offset,
2127 wide_int_to_tree (sizetype, tem));
2128 else
2129 offset = wide_int_to_tree (sizetype, tem);
2130 }
2131
2132 bitpos += bit_offset.to_shwi ();
2133 }
2134
2135 if (bitpos % BITS_PER_UNIT)
2136 return false;
2137 if (bitsize % BITS_PER_UNIT)
2138 return false;
2139 if (reversep)
2140 return false;
2141
2142 if (!init_symbolic_number (n, ref))
2143 return false;
2144 n->base_addr = base_addr;
2145 n->offset = offset;
2146 n->bytepos = bitpos / BITS_PER_UNIT;
2147 n->alias_set = reference_alias_ptr_type (ref);
2148 n->vuse = gimple_vuse (stmt);
2149 return true;
2150 }
2151
2152 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2153 symbolic number N1 and N2 whose source statements are respectively
2154 SOURCE_STMT1 and SOURCE_STMT2. */
2155
2156 static gimple *
2157 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2158 gimple *source_stmt2, struct symbolic_number *n2,
2159 struct symbolic_number *n)
2160 {
2161 int i, size;
2162 uint64_t mask;
2163 gimple *source_stmt;
2164 struct symbolic_number *n_start;
2165
2166 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2167 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2168 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2169 rhs1 = TREE_OPERAND (rhs1, 0);
2170 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2171 if (TREE_CODE (rhs2) == BIT_FIELD_REF
2172 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2173 rhs2 = TREE_OPERAND (rhs2, 0);
2174
2175 /* Sources are different, cancel bswap if they are not memory location with
2176 the same base (array, structure, ...). */
2177 if (rhs1 != rhs2)
2178 {
2179 uint64_t inc;
2180 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2181 struct symbolic_number *toinc_n_ptr, *n_end;
2182
2183 if (!n1->base_addr || !n2->base_addr
2184 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2185 return NULL;
2186
2187 if (!n1->offset != !n2->offset
2188 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2189 return NULL;
2190
2191 if (n1->bytepos < n2->bytepos)
2192 {
2193 n_start = n1;
2194 start_sub = n2->bytepos - n1->bytepos;
2195 source_stmt = source_stmt1;
2196 }
2197 else
2198 {
2199 n_start = n2;
2200 start_sub = n1->bytepos - n2->bytepos;
2201 source_stmt = source_stmt2;
2202 }
2203
2204 /* Find the highest address at which a load is performed and
2205 compute related info. */
2206 end1 = n1->bytepos + (n1->range - 1);
2207 end2 = n2->bytepos + (n2->range - 1);
2208 if (end1 < end2)
2209 {
2210 end = end2;
2211 end_sub = end2 - end1;
2212 }
2213 else
2214 {
2215 end = end1;
2216 end_sub = end1 - end2;
2217 }
2218 n_end = (end2 > end1) ? n2 : n1;
2219
2220 /* Find symbolic number whose lsb is the most significant. */
2221 if (BYTES_BIG_ENDIAN)
2222 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2223 else
2224 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2225
2226 n->range = end - n_start->bytepos + 1;
2227
2228 /* Check that the range of memory covered can be represented by
2229 a symbolic number. */
2230 if (n->range > 64 / BITS_PER_MARKER)
2231 return NULL;
2232
2233 /* Reinterpret byte marks in symbolic number holding the value of
2234 bigger weight according to target endianness. */
2235 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2236 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2237 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2238 {
2239 unsigned marker
2240 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2241 if (marker && marker != MARKER_BYTE_UNKNOWN)
2242 toinc_n_ptr->n += inc;
2243 }
2244 }
2245 else
2246 {
2247 n->range = n1->range;
2248 n_start = n1;
2249 source_stmt = source_stmt1;
2250 }
2251
2252 if (!n1->alias_set
2253 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2254 n->alias_set = n1->alias_set;
2255 else
2256 n->alias_set = ptr_type_node;
2257 n->vuse = n_start->vuse;
2258 n->base_addr = n_start->base_addr;
2259 n->offset = n_start->offset;
2260 n->bytepos = n_start->bytepos;
2261 n->type = n_start->type;
2262 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2263
2264 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2265 {
2266 uint64_t masked1, masked2;
2267
2268 masked1 = n1->n & mask;
2269 masked2 = n2->n & mask;
2270 if (masked1 && masked2 && masked1 != masked2)
2271 return NULL;
2272 }
2273 n->n = n1->n | n2->n;
2274
2275 return source_stmt;
2276 }
2277
2278 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2279 the operation given by the rhs of STMT on the result. If the operation
2280 could successfully be executed the function returns a gimple stmt whose
2281 rhs's first tree is the expression of the source operand and NULL
2282 otherwise. */
2283
2284 static gimple *
2285 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2286 {
2287 enum tree_code code;
2288 tree rhs1, rhs2 = NULL;
2289 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2290 enum gimple_rhs_class rhs_class;
2291
2292 if (!limit || !is_gimple_assign (stmt))
2293 return NULL;
2294
2295 rhs1 = gimple_assign_rhs1 (stmt);
2296
2297 if (find_bswap_or_nop_load (stmt, rhs1, n))
2298 return stmt;
2299
2300 /* Handle BIT_FIELD_REF. */
2301 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2302 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2303 {
2304 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2305 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2306 if (bitpos % BITS_PER_UNIT == 0
2307 && bitsize % BITS_PER_UNIT == 0
2308 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2309 {
2310 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2311 if (BYTES_BIG_ENDIAN)
2312 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2313
2314 /* Shift. */
2315 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2316 return NULL;
2317
2318 /* Mask. */
2319 uint64_t mask = 0;
2320 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2321 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2322 i++, tmp <<= BITS_PER_UNIT)
2323 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2324 n->n &= mask;
2325
2326 /* Convert. */
2327 n->type = TREE_TYPE (rhs1);
2328 if (!n->base_addr)
2329 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2330
2331 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2332 }
2333
2334 return NULL;
2335 }
2336
2337 if (TREE_CODE (rhs1) != SSA_NAME)
2338 return NULL;
2339
2340 code = gimple_assign_rhs_code (stmt);
2341 rhs_class = gimple_assign_rhs_class (stmt);
2342 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2343
2344 if (rhs_class == GIMPLE_BINARY_RHS)
2345 rhs2 = gimple_assign_rhs2 (stmt);
2346
2347 /* Handle unary rhs and binary rhs with integer constants as second
2348 operand. */
2349
2350 if (rhs_class == GIMPLE_UNARY_RHS
2351 || (rhs_class == GIMPLE_BINARY_RHS
2352 && TREE_CODE (rhs2) == INTEGER_CST))
2353 {
2354 if (code != BIT_AND_EXPR
2355 && code != LSHIFT_EXPR
2356 && code != RSHIFT_EXPR
2357 && code != LROTATE_EXPR
2358 && code != RROTATE_EXPR
2359 && !CONVERT_EXPR_CODE_P (code))
2360 return NULL;
2361
2362 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2363
2364 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2365 we have to initialize the symbolic number. */
2366 if (!source_stmt1)
2367 {
2368 if (gimple_assign_load_p (stmt)
2369 || !init_symbolic_number (n, rhs1))
2370 return NULL;
2371 source_stmt1 = stmt;
2372 }
2373
2374 switch (code)
2375 {
2376 case BIT_AND_EXPR:
2377 {
2378 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2379 uint64_t val = int_cst_value (rhs2), mask = 0;
2380 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2381
2382 /* Only constants masking full bytes are allowed. */
2383 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2384 if ((val & tmp) != 0 && (val & tmp) != tmp)
2385 return NULL;
2386 else if (val & tmp)
2387 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2388
2389 n->n &= mask;
2390 }
2391 break;
2392 case LSHIFT_EXPR:
2393 case RSHIFT_EXPR:
2394 case LROTATE_EXPR:
2395 case RROTATE_EXPR:
2396 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2397 return NULL;
2398 break;
2399 CASE_CONVERT:
2400 {
2401 int i, type_size, old_type_size;
2402 tree type;
2403
2404 type = gimple_expr_type (stmt);
2405 type_size = TYPE_PRECISION (type);
2406 if (type_size % BITS_PER_UNIT != 0)
2407 return NULL;
2408 type_size /= BITS_PER_UNIT;
2409 if (type_size > 64 / BITS_PER_MARKER)
2410 return NULL;
2411
2412 /* Sign extension: result is dependent on the value. */
2413 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2414 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2415 && HEAD_MARKER (n->n, old_type_size))
2416 for (i = 0; i < type_size - old_type_size; i++)
2417 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2418 << ((type_size - 1 - i) * BITS_PER_MARKER);
2419
2420 if (type_size < 64 / BITS_PER_MARKER)
2421 {
2422 /* If STMT casts to a smaller type mask out the bits not
2423 belonging to the target type. */
2424 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2425 }
2426 n->type = type;
2427 if (!n->base_addr)
2428 n->range = type_size;
2429 }
2430 break;
2431 default:
2432 return NULL;
2433 };
2434 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2435 }
2436
2437 /* Handle binary rhs. */
2438
2439 if (rhs_class == GIMPLE_BINARY_RHS)
2440 {
2441 struct symbolic_number n1, n2;
2442 gimple *source_stmt, *source_stmt2;
2443
2444 if (code != BIT_IOR_EXPR)
2445 return NULL;
2446
2447 if (TREE_CODE (rhs2) != SSA_NAME)
2448 return NULL;
2449
2450 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2451
2452 switch (code)
2453 {
2454 case BIT_IOR_EXPR:
2455 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2456
2457 if (!source_stmt1)
2458 return NULL;
2459
2460 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2461
2462 if (!source_stmt2)
2463 return NULL;
2464
2465 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2466 return NULL;
2467
2468 if (!n1.vuse != !n2.vuse
2469 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2470 return NULL;
2471
2472 source_stmt
2473 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2474
2475 if (!source_stmt)
2476 return NULL;
2477
2478 if (!verify_symbolic_number_p (n, stmt))
2479 return NULL;
2480
2481 break;
2482 default:
2483 return NULL;
2484 }
2485 return source_stmt;
2486 }
2487 return NULL;
2488 }
2489
2490 /* Check if STMT completes a bswap implementation or a read in a given
2491 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2492 accordingly. It also sets N to represent the kind of operations
2493 performed: size of the resulting expression and whether it works on
2494 a memory source, and if so alias-set and vuse. At last, the
2495 function returns a stmt whose rhs's first tree is the source
2496 expression. */
2497
2498 static gimple *
2499 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2500 {
2501 /* The number which the find_bswap_or_nop_1 result should match in order
2502 to have a full byte swap. The number is shifted to the right
2503 according to the size of the symbolic number before using it. */
2504 uint64_t cmpxchg = CMPXCHG;
2505 uint64_t cmpnop = CMPNOP;
2506
2507 gimple *source_stmt;
2508 int limit;
2509
2510 /* The last parameter determines the depth search limit. It usually
2511 correlates directly to the number n of bytes to be touched. We
2512 increase that number by log2(n) + 1 here in order to also
2513 cover signed -> unsigned conversions of the src operand as can be seen
2514 in libgcc, and for initial shift/and operation of the src operand. */
2515 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2516 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2517 source_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2518
2519 if (!source_stmt)
2520 return NULL;
2521
2522 /* Find real size of result (highest non-zero byte). */
2523 if (n->base_addr)
2524 {
2525 unsigned HOST_WIDE_INT rsize;
2526 uint64_t tmpn;
2527
2528 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2529 if (BYTES_BIG_ENDIAN && n->range != rsize)
2530 /* This implies an offset, which is currently not handled by
2531 bswap_replace. */
2532 return NULL;
2533 n->range = rsize;
2534 }
2535
2536 /* Zero out the extra bits of N and CMP*. */
2537 if (n->range < (int) sizeof (int64_t))
2538 {
2539 uint64_t mask;
2540
2541 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2542 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2543 cmpnop &= mask;
2544 }
2545
2546 /* A complete byte swap should make the symbolic number to start with
2547 the largest digit in the highest order byte. Unchanged symbolic
2548 number indicates a read with same endianness as target architecture. */
2549 if (n->n == cmpnop)
2550 *bswap = false;
2551 else if (n->n == cmpxchg)
2552 *bswap = true;
2553 else
2554 return NULL;
2555
2556 /* Useless bit manipulation performed by code. */
2557 if (!n->base_addr && n->n == cmpnop)
2558 return NULL;
2559
2560 n->range *= BITS_PER_UNIT;
2561 return source_stmt;
2562 }
2563
2564 namespace {
2565
2566 const pass_data pass_data_optimize_bswap =
2567 {
2568 GIMPLE_PASS, /* type */
2569 "bswap", /* name */
2570 OPTGROUP_NONE, /* optinfo_flags */
2571 TV_NONE, /* tv_id */
2572 PROP_ssa, /* properties_required */
2573 0, /* properties_provided */
2574 0, /* properties_destroyed */
2575 0, /* todo_flags_start */
2576 0, /* todo_flags_finish */
2577 };
2578
2579 class pass_optimize_bswap : public gimple_opt_pass
2580 {
2581 public:
2582 pass_optimize_bswap (gcc::context *ctxt)
2583 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2584 {}
2585
2586 /* opt_pass methods: */
2587 virtual bool gate (function *)
2588 {
2589 return flag_expensive_optimizations && optimize;
2590 }
2591
2592 virtual unsigned int execute (function *);
2593
2594 }; // class pass_optimize_bswap
2595
2596 /* Perform the bswap optimization: replace the expression computed in the rhs
2597 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2598 Which of these alternatives replace the rhs is given by N->base_addr (non
2599 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2600 load to perform are also given in N while the builtin bswap invoke is given
2601 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2602 load statements involved to construct the rhs in CUR_STMT and N->range gives
2603 the size of the rhs expression for maintaining some statistics.
2604
2605 Note that if the replacement involve a load, CUR_STMT is moved just after
2606 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2607 changing of basic block. */
2608
2609 static bool
2610 bswap_replace (gimple *cur_stmt, gimple *src_stmt, tree fndecl,
2611 tree bswap_type, tree load_type, struct symbolic_number *n,
2612 bool bswap)
2613 {
2614 gimple_stmt_iterator gsi;
2615 tree src, tmp, tgt;
2616 gimple *bswap_stmt;
2617
2618 gsi = gsi_for_stmt (cur_stmt);
2619 src = gimple_assign_rhs1 (src_stmt);
2620 tgt = gimple_assign_lhs (cur_stmt);
2621
2622 /* Need to load the value from memory first. */
2623 if (n->base_addr)
2624 {
2625 gimple_stmt_iterator gsi_ins = gsi_for_stmt (src_stmt);
2626 tree addr_expr, addr_tmp, val_expr, val_tmp;
2627 tree load_offset_ptr, aligned_load_type;
2628 gimple *addr_stmt, *load_stmt;
2629 unsigned align;
2630 HOST_WIDE_INT load_offset = 0;
2631
2632 align = get_object_alignment (src);
2633 /* If the new access is smaller than the original one, we need
2634 to perform big endian adjustment. */
2635 if (BYTES_BIG_ENDIAN)
2636 {
2637 HOST_WIDE_INT bitsize, bitpos;
2638 machine_mode mode;
2639 int unsignedp, reversep, volatilep;
2640 tree offset;
2641
2642 get_inner_reference (src, &bitsize, &bitpos, &offset, &mode,
2643 &unsignedp, &reversep, &volatilep);
2644 if (n->range < (unsigned HOST_WIDE_INT) bitsize)
2645 {
2646 load_offset = (bitsize - n->range) / BITS_PER_UNIT;
2647 unsigned HOST_WIDE_INT l
2648 = (load_offset * BITS_PER_UNIT) & (align - 1);
2649 if (l)
2650 align = least_bit_hwi (l);
2651 }
2652 }
2653
2654 if (bswap
2655 && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
2656 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2657 return false;
2658
2659 /* Move cur_stmt just before one of the load of the original
2660 to ensure it has the same VUSE. See PR61517 for what could
2661 go wrong. */
2662 if (gimple_bb (cur_stmt) != gimple_bb (src_stmt))
2663 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2664 gsi_move_before (&gsi, &gsi_ins);
2665 gsi = gsi_for_stmt (cur_stmt);
2666
2667 /* Compute address to load from and cast according to the size
2668 of the load. */
2669 addr_expr = build_fold_addr_expr (unshare_expr (src));
2670 if (is_gimple_mem_ref_addr (addr_expr))
2671 addr_tmp = addr_expr;
2672 else
2673 {
2674 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2675 "load_src");
2676 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2677 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2678 }
2679
2680 /* Perform the load. */
2681 aligned_load_type = load_type;
2682 if (align < TYPE_ALIGN (load_type))
2683 aligned_load_type = build_aligned_type (load_type, align);
2684 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2685 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2686 load_offset_ptr);
2687
2688 if (!bswap)
2689 {
2690 if (n->range == 16)
2691 nop_stats.found_16bit++;
2692 else if (n->range == 32)
2693 nop_stats.found_32bit++;
2694 else
2695 {
2696 gcc_assert (n->range == 64);
2697 nop_stats.found_64bit++;
2698 }
2699
2700 /* Convert the result of load if necessary. */
2701 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2702 {
2703 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2704 "load_dst");
2705 load_stmt = gimple_build_assign (val_tmp, val_expr);
2706 gimple_set_vuse (load_stmt, n->vuse);
2707 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2708 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2709 }
2710 else
2711 {
2712 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2713 gimple_set_vuse (cur_stmt, n->vuse);
2714 }
2715 update_stmt (cur_stmt);
2716
2717 if (dump_file)
2718 {
2719 fprintf (dump_file,
2720 "%d bit load in target endianness found at: ",
2721 (int) n->range);
2722 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2723 }
2724 return true;
2725 }
2726 else
2727 {
2728 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2729 load_stmt = gimple_build_assign (val_tmp, val_expr);
2730 gimple_set_vuse (load_stmt, n->vuse);
2731 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2732 }
2733 src = val_tmp;
2734 }
2735 else if (TREE_CODE (src) == BIT_FIELD_REF)
2736 src = TREE_OPERAND (src, 0);
2737
2738 if (n->range == 16)
2739 bswap_stats.found_16bit++;
2740 else if (n->range == 32)
2741 bswap_stats.found_32bit++;
2742 else
2743 {
2744 gcc_assert (n->range == 64);
2745 bswap_stats.found_64bit++;
2746 }
2747
2748 tmp = src;
2749
2750 /* Convert the src expression if necessary. */
2751 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2752 {
2753 gimple *convert_stmt;
2754
2755 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2756 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2757 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2758 }
2759
2760 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2761 are considered as rotation of 2N bit values by N bits is generally not
2762 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2763 gives 0x03040102 while a bswap for that value is 0x04030201. */
2764 if (bswap && n->range == 16)
2765 {
2766 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2767 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2768 bswap_stmt = gimple_build_assign (NULL, src);
2769 }
2770 else
2771 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2772
2773 tmp = tgt;
2774
2775 /* Convert the result if necessary. */
2776 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2777 {
2778 gimple *convert_stmt;
2779
2780 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2781 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2782 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2783 }
2784
2785 gimple_set_lhs (bswap_stmt, tmp);
2786
2787 if (dump_file)
2788 {
2789 fprintf (dump_file, "%d bit bswap implementation found at: ",
2790 (int) n->range);
2791 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2792 }
2793
2794 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2795 gsi_remove (&gsi, true);
2796 return true;
2797 }
2798
2799 /* Find manual byte swap implementations as well as load in a given
2800 endianness. Byte swaps are turned into a bswap builtin invokation
2801 while endian loads are converted to bswap builtin invokation or
2802 simple load according to the target endianness. */
2803
2804 unsigned int
2805 pass_optimize_bswap::execute (function *fun)
2806 {
2807 basic_block bb;
2808 bool bswap32_p, bswap64_p;
2809 bool changed = false;
2810 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2811
2812 if (BITS_PER_UNIT != 8)
2813 return 0;
2814
2815 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2816 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2817 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2818 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2819 || (bswap32_p && word_mode == SImode)));
2820
2821 /* Determine the argument type of the builtins. The code later on
2822 assumes that the return and argument type are the same. */
2823 if (bswap32_p)
2824 {
2825 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2826 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2827 }
2828
2829 if (bswap64_p)
2830 {
2831 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2832 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2833 }
2834
2835 memset (&nop_stats, 0, sizeof (nop_stats));
2836 memset (&bswap_stats, 0, sizeof (bswap_stats));
2837
2838 FOR_EACH_BB_FN (bb, fun)
2839 {
2840 gimple_stmt_iterator gsi;
2841
2842 /* We do a reverse scan for bswap patterns to make sure we get the
2843 widest match. As bswap pattern matching doesn't handle previously
2844 inserted smaller bswap replacements as sub-patterns, the wider
2845 variant wouldn't be detected. */
2846 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2847 {
2848 gimple *src_stmt, *cur_stmt = gsi_stmt (gsi);
2849 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2850 enum tree_code code;
2851 struct symbolic_number n;
2852 bool bswap;
2853
2854 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2855 might be moved to a different basic block by bswap_replace and gsi
2856 must not points to it if that's the case. Moving the gsi_prev
2857 there make sure that gsi points to the statement previous to
2858 cur_stmt while still making sure that all statements are
2859 considered in this basic block. */
2860 gsi_prev (&gsi);
2861
2862 if (!is_gimple_assign (cur_stmt))
2863 continue;
2864
2865 code = gimple_assign_rhs_code (cur_stmt);
2866 switch (code)
2867 {
2868 case LROTATE_EXPR:
2869 case RROTATE_EXPR:
2870 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2871 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2872 % BITS_PER_UNIT)
2873 continue;
2874 /* Fall through. */
2875 case BIT_IOR_EXPR:
2876 break;
2877 default:
2878 continue;
2879 }
2880
2881 src_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2882
2883 if (!src_stmt)
2884 continue;
2885
2886 switch (n.range)
2887 {
2888 case 16:
2889 /* Already in canonical form, nothing to do. */
2890 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2891 continue;
2892 load_type = bswap_type = uint16_type_node;
2893 break;
2894 case 32:
2895 load_type = uint32_type_node;
2896 if (bswap32_p)
2897 {
2898 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2899 bswap_type = bswap32_type;
2900 }
2901 break;
2902 case 64:
2903 load_type = uint64_type_node;
2904 if (bswap64_p)
2905 {
2906 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2907 bswap_type = bswap64_type;
2908 }
2909 break;
2910 default:
2911 continue;
2912 }
2913
2914 if (bswap && !fndecl && n.range != 16)
2915 continue;
2916
2917 if (bswap_replace (cur_stmt, src_stmt, fndecl, bswap_type, load_type,
2918 &n, bswap))
2919 changed = true;
2920 }
2921 }
2922
2923 statistics_counter_event (fun, "16-bit nop implementations found",
2924 nop_stats.found_16bit);
2925 statistics_counter_event (fun, "32-bit nop implementations found",
2926 nop_stats.found_32bit);
2927 statistics_counter_event (fun, "64-bit nop implementations found",
2928 nop_stats.found_64bit);
2929 statistics_counter_event (fun, "16-bit bswap implementations found",
2930 bswap_stats.found_16bit);
2931 statistics_counter_event (fun, "32-bit bswap implementations found",
2932 bswap_stats.found_32bit);
2933 statistics_counter_event (fun, "64-bit bswap implementations found",
2934 bswap_stats.found_64bit);
2935
2936 return (changed ? TODO_update_ssa : 0);
2937 }
2938
2939 } // anon namespace
2940
2941 gimple_opt_pass *
2942 make_pass_optimize_bswap (gcc::context *ctxt)
2943 {
2944 return new pass_optimize_bswap (ctxt);
2945 }
2946
2947 /* Return true if stmt is a type conversion operation that can be stripped
2948 when used in a widening multiply operation. */
2949 static bool
2950 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2951 {
2952 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2953
2954 if (TREE_CODE (result_type) == INTEGER_TYPE)
2955 {
2956 tree op_type;
2957 tree inner_op_type;
2958
2959 if (!CONVERT_EXPR_CODE_P (rhs_code))
2960 return false;
2961
2962 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2963
2964 /* If the type of OP has the same precision as the result, then
2965 we can strip this conversion. The multiply operation will be
2966 selected to create the correct extension as a by-product. */
2967 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2968 return true;
2969
2970 /* We can also strip a conversion if it preserves the signed-ness of
2971 the operation and doesn't narrow the range. */
2972 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2973
2974 /* If the inner-most type is unsigned, then we can strip any
2975 intermediate widening operation. If it's signed, then the
2976 intermediate widening operation must also be signed. */
2977 if ((TYPE_UNSIGNED (inner_op_type)
2978 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2979 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2980 return true;
2981
2982 return false;
2983 }
2984
2985 return rhs_code == FIXED_CONVERT_EXPR;
2986 }
2987
2988 /* Return true if RHS is a suitable operand for a widening multiplication,
2989 assuming a target type of TYPE.
2990 There are two cases:
2991
2992 - RHS makes some value at least twice as wide. Store that value
2993 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2994
2995 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2996 but leave *TYPE_OUT untouched. */
2997
2998 static bool
2999 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3000 tree *new_rhs_out)
3001 {
3002 gimple *stmt;
3003 tree type1, rhs1;
3004
3005 if (TREE_CODE (rhs) == SSA_NAME)
3006 {
3007 stmt = SSA_NAME_DEF_STMT (rhs);
3008 if (is_gimple_assign (stmt))
3009 {
3010 if (! widening_mult_conversion_strippable_p (type, stmt))
3011 rhs1 = rhs;
3012 else
3013 {
3014 rhs1 = gimple_assign_rhs1 (stmt);
3015
3016 if (TREE_CODE (rhs1) == INTEGER_CST)
3017 {
3018 *new_rhs_out = rhs1;
3019 *type_out = NULL;
3020 return true;
3021 }
3022 }
3023 }
3024 else
3025 rhs1 = rhs;
3026
3027 type1 = TREE_TYPE (rhs1);
3028
3029 if (TREE_CODE (type1) != TREE_CODE (type)
3030 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3031 return false;
3032
3033 *new_rhs_out = rhs1;
3034 *type_out = type1;
3035 return true;
3036 }
3037
3038 if (TREE_CODE (rhs) == INTEGER_CST)
3039 {
3040 *new_rhs_out = rhs;
3041 *type_out = NULL;
3042 return true;
3043 }
3044
3045 return false;
3046 }
3047
3048 /* Return true if STMT performs a widening multiplication, assuming the
3049 output type is TYPE. If so, store the unwidened types of the operands
3050 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3051 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3052 and *TYPE2_OUT would give the operands of the multiplication. */
3053
3054 static bool
3055 is_widening_mult_p (gimple *stmt,
3056 tree *type1_out, tree *rhs1_out,
3057 tree *type2_out, tree *rhs2_out)
3058 {
3059 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3060
3061 if (TREE_CODE (type) != INTEGER_TYPE
3062 && TREE_CODE (type) != FIXED_POINT_TYPE)
3063 return false;
3064
3065 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3066 rhs1_out))
3067 return false;
3068
3069 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3070 rhs2_out))
3071 return false;
3072
3073 if (*type1_out == NULL)
3074 {
3075 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3076 return false;
3077 *type1_out = *type2_out;
3078 }
3079
3080 if (*type2_out == NULL)
3081 {
3082 if (!int_fits_type_p (*rhs2_out, *type1_out))
3083 return false;
3084 *type2_out = *type1_out;
3085 }
3086
3087 /* Ensure that the larger of the two operands comes first. */
3088 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3089 {
3090 std::swap (*type1_out, *type2_out);
3091 std::swap (*rhs1_out, *rhs2_out);
3092 }
3093
3094 return true;
3095 }
3096
3097 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3098 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3099 value is true iff we converted the statement. */
3100
3101 static bool
3102 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3103 {
3104 tree lhs, rhs1, rhs2, type, type1, type2;
3105 enum insn_code handler;
3106 machine_mode to_mode, from_mode, actual_mode;
3107 optab op;
3108 int actual_precision;
3109 location_t loc = gimple_location (stmt);
3110 bool from_unsigned1, from_unsigned2;
3111
3112 lhs = gimple_assign_lhs (stmt);
3113 type = TREE_TYPE (lhs);
3114 if (TREE_CODE (type) != INTEGER_TYPE)
3115 return false;
3116
3117 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3118 return false;
3119
3120 to_mode = TYPE_MODE (type);
3121 from_mode = TYPE_MODE (type1);
3122 from_unsigned1 = TYPE_UNSIGNED (type1);
3123 from_unsigned2 = TYPE_UNSIGNED (type2);
3124
3125 if (from_unsigned1 && from_unsigned2)
3126 op = umul_widen_optab;
3127 else if (!from_unsigned1 && !from_unsigned2)
3128 op = smul_widen_optab;
3129 else
3130 op = usmul_widen_optab;
3131
3132 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3133 0, &actual_mode);
3134
3135 if (handler == CODE_FOR_nothing)
3136 {
3137 if (op != smul_widen_optab)
3138 {
3139 /* We can use a signed multiply with unsigned types as long as
3140 there is a wider mode to use, or it is the smaller of the two
3141 types that is unsigned. Note that type1 >= type2, always. */
3142 if ((TYPE_UNSIGNED (type1)
3143 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3144 || (TYPE_UNSIGNED (type2)
3145 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3146 {
3147 from_mode = GET_MODE_WIDER_MODE (from_mode);
3148 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3149 return false;
3150 }
3151
3152 op = smul_widen_optab;
3153 handler = find_widening_optab_handler_and_mode (op, to_mode,
3154 from_mode, 0,
3155 &actual_mode);
3156
3157 if (handler == CODE_FOR_nothing)
3158 return false;
3159
3160 from_unsigned1 = from_unsigned2 = false;
3161 }
3162 else
3163 return false;
3164 }
3165
3166 /* Ensure that the inputs to the handler are in the correct precison
3167 for the opcode. This will be the full mode size. */
3168 actual_precision = GET_MODE_PRECISION (actual_mode);
3169 if (2 * actual_precision > TYPE_PRECISION (type))
3170 return false;
3171 if (actual_precision != TYPE_PRECISION (type1)
3172 || from_unsigned1 != TYPE_UNSIGNED (type1))
3173 rhs1 = build_and_insert_cast (gsi, loc,
3174 build_nonstandard_integer_type
3175 (actual_precision, from_unsigned1), rhs1);
3176 if (actual_precision != TYPE_PRECISION (type2)
3177 || from_unsigned2 != TYPE_UNSIGNED (type2))
3178 rhs2 = build_and_insert_cast (gsi, loc,
3179 build_nonstandard_integer_type
3180 (actual_precision, from_unsigned2), rhs2);
3181
3182 /* Handle constants. */
3183 if (TREE_CODE (rhs1) == INTEGER_CST)
3184 rhs1 = fold_convert (type1, rhs1);
3185 if (TREE_CODE (rhs2) == INTEGER_CST)
3186 rhs2 = fold_convert (type2, rhs2);
3187
3188 gimple_assign_set_rhs1 (stmt, rhs1);
3189 gimple_assign_set_rhs2 (stmt, rhs2);
3190 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3191 update_stmt (stmt);
3192 widen_mul_stats.widen_mults_inserted++;
3193 return true;
3194 }
3195
3196 /* Process a single gimple statement STMT, which is found at the
3197 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3198 rhs (given by CODE), and try to convert it into a
3199 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3200 is true iff we converted the statement. */
3201
3202 static bool
3203 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3204 enum tree_code code)
3205 {
3206 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3207 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3208 tree type, type1, type2, optype;
3209 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3210 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3211 optab this_optab;
3212 enum tree_code wmult_code;
3213 enum insn_code handler;
3214 machine_mode to_mode, from_mode, actual_mode;
3215 location_t loc = gimple_location (stmt);
3216 int actual_precision;
3217 bool from_unsigned1, from_unsigned2;
3218
3219 lhs = gimple_assign_lhs (stmt);
3220 type = TREE_TYPE (lhs);
3221 if (TREE_CODE (type) != INTEGER_TYPE
3222 && TREE_CODE (type) != FIXED_POINT_TYPE)
3223 return false;
3224
3225 if (code == MINUS_EXPR)
3226 wmult_code = WIDEN_MULT_MINUS_EXPR;
3227 else
3228 wmult_code = WIDEN_MULT_PLUS_EXPR;
3229
3230 rhs1 = gimple_assign_rhs1 (stmt);
3231 rhs2 = gimple_assign_rhs2 (stmt);
3232
3233 if (TREE_CODE (rhs1) == SSA_NAME)
3234 {
3235 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3236 if (is_gimple_assign (rhs1_stmt))
3237 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3238 }
3239
3240 if (TREE_CODE (rhs2) == SSA_NAME)
3241 {
3242 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3243 if (is_gimple_assign (rhs2_stmt))
3244 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3245 }
3246
3247 /* Allow for one conversion statement between the multiply
3248 and addition/subtraction statement. If there are more than
3249 one conversions then we assume they would invalidate this
3250 transformation. If that's not the case then they should have
3251 been folded before now. */
3252 if (CONVERT_EXPR_CODE_P (rhs1_code))
3253 {
3254 conv1_stmt = rhs1_stmt;
3255 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3256 if (TREE_CODE (rhs1) == SSA_NAME)
3257 {
3258 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3259 if (is_gimple_assign (rhs1_stmt))
3260 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3261 }
3262 else
3263 return false;
3264 }
3265 if (CONVERT_EXPR_CODE_P (rhs2_code))
3266 {
3267 conv2_stmt = rhs2_stmt;
3268 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3269 if (TREE_CODE (rhs2) == SSA_NAME)
3270 {
3271 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3272 if (is_gimple_assign (rhs2_stmt))
3273 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3274 }
3275 else
3276 return false;
3277 }
3278
3279 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3280 is_widening_mult_p, but we still need the rhs returns.
3281
3282 It might also appear that it would be sufficient to use the existing
3283 operands of the widening multiply, but that would limit the choice of
3284 multiply-and-accumulate instructions.
3285
3286 If the widened-multiplication result has more than one uses, it is
3287 probably wiser not to do the conversion. */
3288 if (code == PLUS_EXPR
3289 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3290 {
3291 if (!has_single_use (rhs1)
3292 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3293 &type2, &mult_rhs2))
3294 return false;
3295 add_rhs = rhs2;
3296 conv_stmt = conv1_stmt;
3297 }
3298 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3299 {
3300 if (!has_single_use (rhs2)
3301 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3302 &type2, &mult_rhs2))
3303 return false;
3304 add_rhs = rhs1;
3305 conv_stmt = conv2_stmt;
3306 }
3307 else
3308 return false;
3309
3310 to_mode = TYPE_MODE (type);
3311 from_mode = TYPE_MODE (type1);
3312 from_unsigned1 = TYPE_UNSIGNED (type1);
3313 from_unsigned2 = TYPE_UNSIGNED (type2);
3314 optype = type1;
3315
3316 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3317 if (from_unsigned1 != from_unsigned2)
3318 {
3319 if (!INTEGRAL_TYPE_P (type))
3320 return false;
3321 /* We can use a signed multiply with unsigned types as long as
3322 there is a wider mode to use, or it is the smaller of the two
3323 types that is unsigned. Note that type1 >= type2, always. */
3324 if ((from_unsigned1
3325 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3326 || (from_unsigned2
3327 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3328 {
3329 from_mode = GET_MODE_WIDER_MODE (from_mode);
3330 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3331 return false;
3332 }
3333
3334 from_unsigned1 = from_unsigned2 = false;
3335 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3336 false);
3337 }
3338
3339 /* If there was a conversion between the multiply and addition
3340 then we need to make sure it fits a multiply-and-accumulate.
3341 The should be a single mode change which does not change the
3342 value. */
3343 if (conv_stmt)
3344 {
3345 /* We use the original, unmodified data types for this. */
3346 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3347 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3348 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3349 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3350
3351 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3352 {
3353 /* Conversion is a truncate. */
3354 if (TYPE_PRECISION (to_type) < data_size)
3355 return false;
3356 }
3357 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3358 {
3359 /* Conversion is an extend. Check it's the right sort. */
3360 if (TYPE_UNSIGNED (from_type) != is_unsigned
3361 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3362 return false;
3363 }
3364 /* else convert is a no-op for our purposes. */
3365 }
3366
3367 /* Verify that the machine can perform a widening multiply
3368 accumulate in this mode/signedness combination, otherwise
3369 this transformation is likely to pessimize code. */
3370 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3371 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3372 from_mode, 0, &actual_mode);
3373
3374 if (handler == CODE_FOR_nothing)
3375 return false;
3376
3377 /* Ensure that the inputs to the handler are in the correct precison
3378 for the opcode. This will be the full mode size. */
3379 actual_precision = GET_MODE_PRECISION (actual_mode);
3380 if (actual_precision != TYPE_PRECISION (type1)
3381 || from_unsigned1 != TYPE_UNSIGNED (type1))
3382 mult_rhs1 = build_and_insert_cast (gsi, loc,
3383 build_nonstandard_integer_type
3384 (actual_precision, from_unsigned1),
3385 mult_rhs1);
3386 if (actual_precision != TYPE_PRECISION (type2)
3387 || from_unsigned2 != TYPE_UNSIGNED (type2))
3388 mult_rhs2 = build_and_insert_cast (gsi, loc,
3389 build_nonstandard_integer_type
3390 (actual_precision, from_unsigned2),
3391 mult_rhs2);
3392
3393 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3394 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3395
3396 /* Handle constants. */
3397 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3398 mult_rhs1 = fold_convert (type1, mult_rhs1);
3399 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3400 mult_rhs2 = fold_convert (type2, mult_rhs2);
3401
3402 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3403 add_rhs);
3404 update_stmt (gsi_stmt (*gsi));
3405 widen_mul_stats.maccs_inserted++;
3406 return true;
3407 }
3408
3409 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3410 with uses in additions and subtractions to form fused multiply-add
3411 operations. Returns true if successful and MUL_STMT should be removed. */
3412
3413 static bool
3414 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3415 {
3416 tree mul_result = gimple_get_lhs (mul_stmt);
3417 tree type = TREE_TYPE (mul_result);
3418 gimple *use_stmt, *neguse_stmt;
3419 gassign *fma_stmt;
3420 use_operand_p use_p;
3421 imm_use_iterator imm_iter;
3422
3423 if (FLOAT_TYPE_P (type)
3424 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3425 return false;
3426
3427 /* We don't want to do bitfield reduction ops. */
3428 if (INTEGRAL_TYPE_P (type)
3429 && (TYPE_PRECISION (type)
3430 != GET_MODE_PRECISION (TYPE_MODE (type))))
3431 return false;
3432
3433 /* If the target doesn't support it, don't generate it. We assume that
3434 if fma isn't available then fms, fnma or fnms are not either. */
3435 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3436 return false;
3437
3438 /* If the multiplication has zero uses, it is kept around probably because
3439 of -fnon-call-exceptions. Don't optimize it away in that case,
3440 it is DCE job. */
3441 if (has_zero_uses (mul_result))
3442 return false;
3443
3444 /* Make sure that the multiplication statement becomes dead after
3445 the transformation, thus that all uses are transformed to FMAs.
3446 This means we assume that an FMA operation has the same cost
3447 as an addition. */
3448 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3449 {
3450 enum tree_code use_code;
3451 tree result = mul_result;
3452 bool negate_p = false;
3453
3454 use_stmt = USE_STMT (use_p);
3455
3456 if (is_gimple_debug (use_stmt))
3457 continue;
3458
3459 /* For now restrict this operations to single basic blocks. In theory
3460 we would want to support sinking the multiplication in
3461 m = a*b;
3462 if ()
3463 ma = m + c;
3464 else
3465 d = m;
3466 to form a fma in the then block and sink the multiplication to the
3467 else block. */
3468 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3469 return false;
3470
3471 if (!is_gimple_assign (use_stmt))
3472 return false;
3473
3474 use_code = gimple_assign_rhs_code (use_stmt);
3475
3476 /* A negate on the multiplication leads to FNMA. */
3477 if (use_code == NEGATE_EXPR)
3478 {
3479 ssa_op_iter iter;
3480 use_operand_p usep;
3481
3482 result = gimple_assign_lhs (use_stmt);
3483
3484 /* Make sure the negate statement becomes dead with this
3485 single transformation. */
3486 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3487 &use_p, &neguse_stmt))
3488 return false;
3489
3490 /* Make sure the multiplication isn't also used on that stmt. */
3491 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3492 if (USE_FROM_PTR (usep) == mul_result)
3493 return false;
3494
3495 /* Re-validate. */
3496 use_stmt = neguse_stmt;
3497 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3498 return false;
3499 if (!is_gimple_assign (use_stmt))
3500 return false;
3501
3502 use_code = gimple_assign_rhs_code (use_stmt);
3503 negate_p = true;
3504 }
3505
3506 switch (use_code)
3507 {
3508 case MINUS_EXPR:
3509 if (gimple_assign_rhs2 (use_stmt) == result)
3510 negate_p = !negate_p;
3511 break;
3512 case PLUS_EXPR:
3513 break;
3514 default:
3515 /* FMA can only be formed from PLUS and MINUS. */
3516 return false;
3517 }
3518
3519 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3520 by a MULT_EXPR that we'll visit later, we might be able to
3521 get a more profitable match with fnma.
3522 OTOH, if we don't, a negate / fma pair has likely lower latency
3523 that a mult / subtract pair. */
3524 if (use_code == MINUS_EXPR && !negate_p
3525 && gimple_assign_rhs1 (use_stmt) == result
3526 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3527 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3528 {
3529 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3530
3531 if (TREE_CODE (rhs2) == SSA_NAME)
3532 {
3533 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3534 if (has_single_use (rhs2)
3535 && is_gimple_assign (stmt2)
3536 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3537 return false;
3538 }
3539 }
3540
3541 /* We can't handle a * b + a * b. */
3542 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3543 return false;
3544
3545 /* While it is possible to validate whether or not the exact form
3546 that we've recognized is available in the backend, the assumption
3547 is that the transformation is never a loss. For instance, suppose
3548 the target only has the plain FMA pattern available. Consider
3549 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3550 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3551 still have 3 operations, but in the FMA form the two NEGs are
3552 independent and could be run in parallel. */
3553 }
3554
3555 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3556 {
3557 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3558 enum tree_code use_code;
3559 tree addop, mulop1 = op1, result = mul_result;
3560 bool negate_p = false;
3561
3562 if (is_gimple_debug (use_stmt))
3563 continue;
3564
3565 use_code = gimple_assign_rhs_code (use_stmt);
3566 if (use_code == NEGATE_EXPR)
3567 {
3568 result = gimple_assign_lhs (use_stmt);
3569 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3570 gsi_remove (&gsi, true);
3571 release_defs (use_stmt);
3572
3573 use_stmt = neguse_stmt;
3574 gsi = gsi_for_stmt (use_stmt);
3575 use_code = gimple_assign_rhs_code (use_stmt);
3576 negate_p = true;
3577 }
3578
3579 if (gimple_assign_rhs1 (use_stmt) == result)
3580 {
3581 addop = gimple_assign_rhs2 (use_stmt);
3582 /* a * b - c -> a * b + (-c) */
3583 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3584 addop = force_gimple_operand_gsi (&gsi,
3585 build1 (NEGATE_EXPR,
3586 type, addop),
3587 true, NULL_TREE, true,
3588 GSI_SAME_STMT);
3589 }
3590 else
3591 {
3592 addop = gimple_assign_rhs1 (use_stmt);
3593 /* a - b * c -> (-b) * c + a */
3594 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3595 negate_p = !negate_p;
3596 }
3597
3598 if (negate_p)
3599 mulop1 = force_gimple_operand_gsi (&gsi,
3600 build1 (NEGATE_EXPR,
3601 type, mulop1),
3602 true, NULL_TREE, true,
3603 GSI_SAME_STMT);
3604
3605 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3606 FMA_EXPR, mulop1, op2, addop);
3607 gsi_replace (&gsi, fma_stmt, true);
3608 widen_mul_stats.fmas_inserted++;
3609 }
3610
3611 return true;
3612 }
3613
3614
3615 /* Helper function of match_uaddsub_overflow. Return 1
3616 if USE_STMT is unsigned overflow check ovf != 0 for
3617 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3618 and 0 otherwise. */
3619
3620 static int
3621 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3622 {
3623 enum tree_code ccode = ERROR_MARK;
3624 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3625 if (gimple_code (use_stmt) == GIMPLE_COND)
3626 {
3627 ccode = gimple_cond_code (use_stmt);
3628 crhs1 = gimple_cond_lhs (use_stmt);
3629 crhs2 = gimple_cond_rhs (use_stmt);
3630 }
3631 else if (is_gimple_assign (use_stmt))
3632 {
3633 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3634 {
3635 ccode = gimple_assign_rhs_code (use_stmt);
3636 crhs1 = gimple_assign_rhs1 (use_stmt);
3637 crhs2 = gimple_assign_rhs2 (use_stmt);
3638 }
3639 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3640 {
3641 tree cond = gimple_assign_rhs1 (use_stmt);
3642 if (COMPARISON_CLASS_P (cond))
3643 {
3644 ccode = TREE_CODE (cond);
3645 crhs1 = TREE_OPERAND (cond, 0);
3646 crhs2 = TREE_OPERAND (cond, 1);
3647 }
3648 else
3649 return 0;
3650 }
3651 else
3652 return 0;
3653 }
3654 else
3655 return 0;
3656
3657 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3658 return 0;
3659
3660 enum tree_code code = gimple_assign_rhs_code (stmt);
3661 tree lhs = gimple_assign_lhs (stmt);
3662 tree rhs1 = gimple_assign_rhs1 (stmt);
3663 tree rhs2 = gimple_assign_rhs2 (stmt);
3664
3665 switch (ccode)
3666 {
3667 case GT_EXPR:
3668 case LE_EXPR:
3669 /* r = a - b; r > a or r <= a
3670 r = a + b; a > r or a <= r or b > r or b <= r. */
3671 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3672 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3673 && crhs2 == lhs))
3674 return ccode == GT_EXPR ? 1 : -1;
3675 break;
3676 case LT_EXPR:
3677 case GE_EXPR:
3678 /* r = a - b; a < r or a >= r
3679 r = a + b; r < a or r >= a or r < b or r >= b. */
3680 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3681 || (code == PLUS_EXPR && crhs1 == lhs
3682 && (crhs2 == rhs1 || crhs2 == rhs2)))
3683 return ccode == LT_EXPR ? 1 : -1;
3684 break;
3685 default:
3686 break;
3687 }
3688 return 0;
3689 }
3690
3691 /* Recognize for unsigned x
3692 x = y - z;
3693 if (x > y)
3694 where there are other uses of x and replace it with
3695 _7 = SUB_OVERFLOW (y, z);
3696 x = REALPART_EXPR <_7>;
3697 _8 = IMAGPART_EXPR <_7>;
3698 if (_8)
3699 and similarly for addition. */
3700
3701 static bool
3702 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3703 enum tree_code code)
3704 {
3705 tree lhs = gimple_assign_lhs (stmt);
3706 tree type = TREE_TYPE (lhs);
3707 use_operand_p use_p;
3708 imm_use_iterator iter;
3709 bool use_seen = false;
3710 bool ovf_use_seen = false;
3711 gimple *use_stmt;
3712
3713 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3714 if (!INTEGRAL_TYPE_P (type)
3715 || !TYPE_UNSIGNED (type)
3716 || has_zero_uses (lhs)
3717 || has_single_use (lhs)
3718 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3719 TYPE_MODE (type)) == CODE_FOR_nothing)
3720 return false;
3721
3722 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3723 {
3724 use_stmt = USE_STMT (use_p);
3725 if (is_gimple_debug (use_stmt))
3726 continue;
3727
3728 if (uaddsub_overflow_check_p (stmt, use_stmt))
3729 ovf_use_seen = true;
3730 else
3731 use_seen = true;
3732 if (ovf_use_seen && use_seen)
3733 break;
3734 }
3735
3736 if (!ovf_use_seen || !use_seen)
3737 return false;
3738
3739 tree ctype = build_complex_type (type);
3740 tree rhs1 = gimple_assign_rhs1 (stmt);
3741 tree rhs2 = gimple_assign_rhs2 (stmt);
3742 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3743 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3744 2, rhs1, rhs2);
3745 tree ctmp = make_ssa_name (ctype);
3746 gimple_call_set_lhs (g, ctmp);
3747 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3748 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3749 build1 (REALPART_EXPR, type, ctmp));
3750 gsi_replace (gsi, g2, true);
3751 tree ovf = make_ssa_name (type);
3752 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3753 build1 (IMAGPART_EXPR, type, ctmp));
3754 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3755
3756 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3757 {
3758 if (is_gimple_debug (use_stmt))
3759 continue;
3760
3761 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3762 if (ovf_use == 0)
3763 continue;
3764 if (gimple_code (use_stmt) == GIMPLE_COND)
3765 {
3766 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3767 gimple_cond_set_lhs (cond_stmt, ovf);
3768 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3769 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3770 }
3771 else
3772 {
3773 gcc_checking_assert (is_gimple_assign (use_stmt));
3774 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3775 {
3776 gimple_assign_set_rhs1 (use_stmt, ovf);
3777 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3778 gimple_assign_set_rhs_code (use_stmt,
3779 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3780 }
3781 else
3782 {
3783 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3784 == COND_EXPR);
3785 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3786 boolean_type_node, ovf,
3787 build_int_cst (type, 0));
3788 gimple_assign_set_rhs1 (use_stmt, cond);
3789 }
3790 }
3791 update_stmt (use_stmt);
3792 }
3793 return true;
3794 }
3795
3796
3797 /* Find integer multiplications where the operands are extended from
3798 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3799 where appropriate. */
3800
3801 namespace {
3802
3803 const pass_data pass_data_optimize_widening_mul =
3804 {
3805 GIMPLE_PASS, /* type */
3806 "widening_mul", /* name */
3807 OPTGROUP_NONE, /* optinfo_flags */
3808 TV_NONE, /* tv_id */
3809 PROP_ssa, /* properties_required */
3810 0, /* properties_provided */
3811 0, /* properties_destroyed */
3812 0, /* todo_flags_start */
3813 TODO_update_ssa, /* todo_flags_finish */
3814 };
3815
3816 class pass_optimize_widening_mul : public gimple_opt_pass
3817 {
3818 public:
3819 pass_optimize_widening_mul (gcc::context *ctxt)
3820 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3821 {}
3822
3823 /* opt_pass methods: */
3824 virtual bool gate (function *)
3825 {
3826 return flag_expensive_optimizations && optimize;
3827 }
3828
3829 virtual unsigned int execute (function *);
3830
3831 }; // class pass_optimize_widening_mul
3832
3833 unsigned int
3834 pass_optimize_widening_mul::execute (function *fun)
3835 {
3836 basic_block bb;
3837 bool cfg_changed = false;
3838
3839 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3840
3841 FOR_EACH_BB_FN (bb, fun)
3842 {
3843 gimple_stmt_iterator gsi;
3844
3845 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3846 {
3847 gimple *stmt = gsi_stmt (gsi);
3848 enum tree_code code;
3849
3850 if (is_gimple_assign (stmt))
3851 {
3852 code = gimple_assign_rhs_code (stmt);
3853 switch (code)
3854 {
3855 case MULT_EXPR:
3856 if (!convert_mult_to_widen (stmt, &gsi)
3857 && convert_mult_to_fma (stmt,
3858 gimple_assign_rhs1 (stmt),
3859 gimple_assign_rhs2 (stmt)))
3860 {
3861 gsi_remove (&gsi, true);
3862 release_defs (stmt);
3863 continue;
3864 }
3865 break;
3866
3867 case PLUS_EXPR:
3868 case MINUS_EXPR:
3869 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3870 match_uaddsub_overflow (&gsi, stmt, code);
3871 break;
3872
3873 default:;
3874 }
3875 }
3876 else if (is_gimple_call (stmt)
3877 && gimple_call_lhs (stmt))
3878 {
3879 tree fndecl = gimple_call_fndecl (stmt);
3880 if (fndecl
3881 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3882 {
3883 switch (DECL_FUNCTION_CODE (fndecl))
3884 {
3885 case BUILT_IN_POWF:
3886 case BUILT_IN_POW:
3887 case BUILT_IN_POWL:
3888 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3889 && real_equal
3890 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3891 &dconst2)
3892 && convert_mult_to_fma (stmt,
3893 gimple_call_arg (stmt, 0),
3894 gimple_call_arg (stmt, 0)))
3895 {
3896 unlink_stmt_vdef (stmt);
3897 if (gsi_remove (&gsi, true)
3898 && gimple_purge_dead_eh_edges (bb))
3899 cfg_changed = true;
3900 release_defs (stmt);
3901 continue;
3902 }
3903 break;
3904
3905 default:;
3906 }
3907 }
3908 }
3909 gsi_next (&gsi);
3910 }
3911 }
3912
3913 statistics_counter_event (fun, "widening multiplications inserted",
3914 widen_mul_stats.widen_mults_inserted);
3915 statistics_counter_event (fun, "widening maccs inserted",
3916 widen_mul_stats.maccs_inserted);
3917 statistics_counter_event (fun, "fused multiply-adds inserted",
3918 widen_mul_stats.fmas_inserted);
3919
3920 return cfg_changed ? TODO_cleanup_cfg : 0;
3921 }
3922
3923 } // anon namespace
3924
3925 gimple_opt_pass *
3926 make_pass_optimize_widening_mul (gcc::context *ctxt)
3927 {
3928 return new pass_optimize_widening_mul (ctxt);
3929 }