]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-math-opts.c
Fix PR77673: bswap loads passed end of object
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115 #include "optabs-libfuncs.h"
116 #include "tree-eh.h"
117 #include "targhooks.h"
118
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
121 division. */
122 struct occurrence {
123 /* The basic block represented by this structure. */
124 basic_block bb;
125
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
127 inserted in BB. */
128 tree recip_def;
129
130 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
131 was inserted in BB. */
132 gimple *recip_def_stmt;
133
134 /* Pointer to a list of "struct occurrence"s for blocks dominated
135 by BB. */
136 struct occurrence *children;
137
138 /* Pointer to the next "struct occurrence"s in the list of blocks
139 sharing a common dominator. */
140 struct occurrence *next;
141
142 /* The number of divisions that are in BB before compute_merit. The
143 number of divisions that are in BB or post-dominate it after
144 compute_merit. */
145 int num_divisions;
146
147 /* True if the basic block has a division, false if it is a common
148 dominator for basic blocks that do. If it is false and trapping
149 math is active, BB is not a candidate for inserting a reciprocal. */
150 bool bb_has_division;
151 };
152
153 static struct
154 {
155 /* Number of 1.0/X ops inserted. */
156 int rdivs_inserted;
157
158 /* Number of 1.0/FUNC ops inserted. */
159 int rfuncs_inserted;
160 } reciprocal_stats;
161
162 static struct
163 {
164 /* Number of cexpi calls inserted. */
165 int inserted;
166 } sincos_stats;
167
168 static struct
169 {
170 /* Number of hand-written 16-bit nop / bswaps found. */
171 int found_16bit;
172
173 /* Number of hand-written 32-bit nop / bswaps found. */
174 int found_32bit;
175
176 /* Number of hand-written 64-bit nop / bswaps found. */
177 int found_64bit;
178 } nop_stats, bswap_stats;
179
180 static struct
181 {
182 /* Number of widening multiplication ops inserted. */
183 int widen_mults_inserted;
184
185 /* Number of integer multiply-and-accumulate ops inserted. */
186 int maccs_inserted;
187
188 /* Number of fp fused multiply-add ops inserted. */
189 int fmas_inserted;
190
191 /* Number of divmod calls inserted. */
192 int divmod_calls_inserted;
193 } widen_mul_stats;
194
195 /* The instance of "struct occurrence" representing the highest
196 interesting block in the dominator tree. */
197 static struct occurrence *occ_head;
198
199 /* Allocation pool for getting instances of "struct occurrence". */
200 static object_allocator<occurrence> *occ_pool;
201
202
203
204 /* Allocate and return a new struct occurrence for basic block BB, and
205 whose children list is headed by CHILDREN. */
206 static struct occurrence *
207 occ_new (basic_block bb, struct occurrence *children)
208 {
209 struct occurrence *occ;
210
211 bb->aux = occ = occ_pool->allocate ();
212 memset (occ, 0, sizeof (struct occurrence));
213
214 occ->bb = bb;
215 occ->children = children;
216 return occ;
217 }
218
219
220 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
221 list of "struct occurrence"s, one per basic block, having IDOM as
222 their common dominator.
223
224 We try to insert NEW_OCC as deep as possible in the tree, and we also
225 insert any other block that is a common dominator for BB and one
226 block already in the tree. */
227
228 static void
229 insert_bb (struct occurrence *new_occ, basic_block idom,
230 struct occurrence **p_head)
231 {
232 struct occurrence *occ, **p_occ;
233
234 for (p_occ = p_head; (occ = *p_occ) != NULL; )
235 {
236 basic_block bb = new_occ->bb, occ_bb = occ->bb;
237 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
238 if (dom == bb)
239 {
240 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
241 from its list. */
242 *p_occ = occ->next;
243 occ->next = new_occ->children;
244 new_occ->children = occ;
245
246 /* Try the next block (it may as well be dominated by BB). */
247 }
248
249 else if (dom == occ_bb)
250 {
251 /* OCC_BB dominates BB. Tail recurse to look deeper. */
252 insert_bb (new_occ, dom, &occ->children);
253 return;
254 }
255
256 else if (dom != idom)
257 {
258 gcc_assert (!dom->aux);
259
260 /* There is a dominator between IDOM and BB, add it and make
261 two children out of NEW_OCC and OCC. First, remove OCC from
262 its list. */
263 *p_occ = occ->next;
264 new_occ->next = occ;
265 occ->next = NULL;
266
267 /* None of the previous blocks has DOM as a dominator: if we tail
268 recursed, we would reexamine them uselessly. Just switch BB with
269 DOM, and go on looking for blocks dominated by DOM. */
270 new_occ = occ_new (dom, new_occ);
271 }
272
273 else
274 {
275 /* Nothing special, go on with the next element. */
276 p_occ = &occ->next;
277 }
278 }
279
280 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
281 new_occ->next = *p_head;
282 *p_head = new_occ;
283 }
284
285 /* Register that we found a division in BB. */
286
287 static inline void
288 register_division_in (basic_block bb)
289 {
290 struct occurrence *occ;
291
292 occ = (struct occurrence *) bb->aux;
293 if (!occ)
294 {
295 occ = occ_new (bb, NULL);
296 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
297 }
298
299 occ->bb_has_division = true;
300 occ->num_divisions++;
301 }
302
303
304 /* Compute the number of divisions that postdominate each block in OCC and
305 its children. */
306
307 static void
308 compute_merit (struct occurrence *occ)
309 {
310 struct occurrence *occ_child;
311 basic_block dom = occ->bb;
312
313 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
314 {
315 basic_block bb;
316 if (occ_child->children)
317 compute_merit (occ_child);
318
319 if (flag_exceptions)
320 bb = single_noncomplex_succ (dom);
321 else
322 bb = dom;
323
324 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
325 occ->num_divisions += occ_child->num_divisions;
326 }
327 }
328
329
330 /* Return whether USE_STMT is a floating-point division by DEF. */
331 static inline bool
332 is_division_by (gimple *use_stmt, tree def)
333 {
334 return is_gimple_assign (use_stmt)
335 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
336 && gimple_assign_rhs2 (use_stmt) == def
337 /* Do not recognize x / x as valid division, as we are getting
338 confused later by replacing all immediate uses x in such
339 a stmt. */
340 && gimple_assign_rhs1 (use_stmt) != def;
341 }
342
343 /* Walk the subset of the dominator tree rooted at OCC, setting the
344 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
345 the given basic block. The field may be left NULL, of course,
346 if it is not possible or profitable to do the optimization.
347
348 DEF_BSI is an iterator pointing at the statement defining DEF.
349 If RECIP_DEF is set, a dominator already has a computation that can
350 be used. */
351
352 static void
353 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
354 tree def, tree recip_def, int threshold)
355 {
356 tree type;
357 gassign *new_stmt;
358 gimple_stmt_iterator gsi;
359 struct occurrence *occ_child;
360
361 if (!recip_def
362 && (occ->bb_has_division || !flag_trapping_math)
363 && occ->num_divisions >= threshold)
364 {
365 /* Make a variable with the replacement and substitute it. */
366 type = TREE_TYPE (def);
367 recip_def = create_tmp_reg (type, "reciptmp");
368 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
369 build_one_cst (type), def);
370
371 if (occ->bb_has_division)
372 {
373 /* Case 1: insert before an existing division. */
374 gsi = gsi_after_labels (occ->bb);
375 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
376 gsi_next (&gsi);
377
378 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
379 }
380 else if (def_gsi && occ->bb == def_gsi->bb)
381 {
382 /* Case 2: insert right after the definition. Note that this will
383 never happen if the definition statement can throw, because in
384 that case the sole successor of the statement's basic block will
385 dominate all the uses as well. */
386 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
387 }
388 else
389 {
390 /* Case 3: insert in a basic block not containing defs/uses. */
391 gsi = gsi_after_labels (occ->bb);
392 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
393 }
394
395 reciprocal_stats.rdivs_inserted++;
396
397 occ->recip_def_stmt = new_stmt;
398 }
399
400 occ->recip_def = recip_def;
401 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
402 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
403 }
404
405
406 /* Replace the division at USE_P with a multiplication by the reciprocal, if
407 possible. */
408
409 static inline void
410 replace_reciprocal (use_operand_p use_p)
411 {
412 gimple *use_stmt = USE_STMT (use_p);
413 basic_block bb = gimple_bb (use_stmt);
414 struct occurrence *occ = (struct occurrence *) bb->aux;
415
416 if (optimize_bb_for_speed_p (bb)
417 && occ->recip_def && use_stmt != occ->recip_def_stmt)
418 {
419 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
420 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
421 SET_USE (use_p, occ->recip_def);
422 fold_stmt_inplace (&gsi);
423 update_stmt (use_stmt);
424 }
425 }
426
427
428 /* Free OCC and return one more "struct occurrence" to be freed. */
429
430 static struct occurrence *
431 free_bb (struct occurrence *occ)
432 {
433 struct occurrence *child, *next;
434
435 /* First get the two pointers hanging off OCC. */
436 next = occ->next;
437 child = occ->children;
438 occ->bb->aux = NULL;
439 occ_pool->remove (occ);
440
441 /* Now ensure that we don't recurse unless it is necessary. */
442 if (!child)
443 return next;
444 else
445 {
446 while (next)
447 next = free_bb (next);
448
449 return child;
450 }
451 }
452
453
454 /* Look for floating-point divisions among DEF's uses, and try to
455 replace them by multiplications with the reciprocal. Add
456 as many statements computing the reciprocal as needed.
457
458 DEF must be a GIMPLE register of a floating-point type. */
459
460 static void
461 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
462 {
463 use_operand_p use_p;
464 imm_use_iterator use_iter;
465 struct occurrence *occ;
466 int count = 0, threshold;
467
468 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
469
470 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
471 {
472 gimple *use_stmt = USE_STMT (use_p);
473 if (is_division_by (use_stmt, def))
474 {
475 register_division_in (gimple_bb (use_stmt));
476 count++;
477 }
478 }
479
480 /* Do the expensive part only if we can hope to optimize something. */
481 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
482 if (count >= threshold)
483 {
484 gimple *use_stmt;
485 for (occ = occ_head; occ; occ = occ->next)
486 {
487 compute_merit (occ);
488 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
489 }
490
491 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
492 {
493 if (is_division_by (use_stmt, def))
494 {
495 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
496 replace_reciprocal (use_p);
497 }
498 }
499 }
500
501 for (occ = occ_head; occ; )
502 occ = free_bb (occ);
503
504 occ_head = NULL;
505 }
506
507 /* Return an internal function that implements the reciprocal of CALL,
508 or IFN_LAST if there is no such function that the target supports. */
509
510 internal_fn
511 internal_fn_reciprocal (gcall *call)
512 {
513 internal_fn ifn;
514
515 switch (gimple_call_combined_fn (call))
516 {
517 CASE_CFN_SQRT:
518 ifn = IFN_RSQRT;
519 break;
520
521 default:
522 return IFN_LAST;
523 }
524
525 tree_pair types = direct_internal_fn_types (ifn, call);
526 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
527 return IFN_LAST;
528
529 return ifn;
530 }
531
532 /* Go through all the floating-point SSA_NAMEs, and call
533 execute_cse_reciprocals_1 on each of them. */
534 namespace {
535
536 const pass_data pass_data_cse_reciprocals =
537 {
538 GIMPLE_PASS, /* type */
539 "recip", /* name */
540 OPTGROUP_NONE, /* optinfo_flags */
541 TV_NONE, /* tv_id */
542 PROP_ssa, /* properties_required */
543 0, /* properties_provided */
544 0, /* properties_destroyed */
545 0, /* todo_flags_start */
546 TODO_update_ssa, /* todo_flags_finish */
547 };
548
549 class pass_cse_reciprocals : public gimple_opt_pass
550 {
551 public:
552 pass_cse_reciprocals (gcc::context *ctxt)
553 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
554 {}
555
556 /* opt_pass methods: */
557 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
558 virtual unsigned int execute (function *);
559
560 }; // class pass_cse_reciprocals
561
562 unsigned int
563 pass_cse_reciprocals::execute (function *fun)
564 {
565 basic_block bb;
566 tree arg;
567
568 occ_pool = new object_allocator<occurrence> ("dominators for recip");
569
570 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
571 calculate_dominance_info (CDI_DOMINATORS);
572 calculate_dominance_info (CDI_POST_DOMINATORS);
573
574 if (flag_checking)
575 FOR_EACH_BB_FN (bb, fun)
576 gcc_assert (!bb->aux);
577
578 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
579 if (FLOAT_TYPE_P (TREE_TYPE (arg))
580 && is_gimple_reg (arg))
581 {
582 tree name = ssa_default_def (fun, arg);
583 if (name)
584 execute_cse_reciprocals_1 (NULL, name);
585 }
586
587 FOR_EACH_BB_FN (bb, fun)
588 {
589 tree def;
590
591 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
592 gsi_next (&gsi))
593 {
594 gphi *phi = gsi.phi ();
595 def = PHI_RESULT (phi);
596 if (! virtual_operand_p (def)
597 && FLOAT_TYPE_P (TREE_TYPE (def)))
598 execute_cse_reciprocals_1 (NULL, def);
599 }
600
601 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
602 gsi_next (&gsi))
603 {
604 gimple *stmt = gsi_stmt (gsi);
605
606 if (gimple_has_lhs (stmt)
607 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
608 && FLOAT_TYPE_P (TREE_TYPE (def))
609 && TREE_CODE (def) == SSA_NAME)
610 execute_cse_reciprocals_1 (&gsi, def);
611 }
612
613 if (optimize_bb_for_size_p (bb))
614 continue;
615
616 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
617 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
618 gsi_next (&gsi))
619 {
620 gimple *stmt = gsi_stmt (gsi);
621
622 if (is_gimple_assign (stmt)
623 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
624 {
625 tree arg1 = gimple_assign_rhs2 (stmt);
626 gimple *stmt1;
627
628 if (TREE_CODE (arg1) != SSA_NAME)
629 continue;
630
631 stmt1 = SSA_NAME_DEF_STMT (arg1);
632
633 if (is_gimple_call (stmt1)
634 && gimple_call_lhs (stmt1))
635 {
636 bool fail;
637 imm_use_iterator ui;
638 use_operand_p use_p;
639 tree fndecl = NULL_TREE;
640
641 gcall *call = as_a <gcall *> (stmt1);
642 internal_fn ifn = internal_fn_reciprocal (call);
643 if (ifn == IFN_LAST)
644 {
645 fndecl = gimple_call_fndecl (call);
646 if (!fndecl
647 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
648 continue;
649 fndecl = targetm.builtin_reciprocal (fndecl);
650 if (!fndecl)
651 continue;
652 }
653
654 /* Check that all uses of the SSA name are divisions,
655 otherwise replacing the defining statement will do
656 the wrong thing. */
657 fail = false;
658 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
659 {
660 gimple *stmt2 = USE_STMT (use_p);
661 if (is_gimple_debug (stmt2))
662 continue;
663 if (!is_gimple_assign (stmt2)
664 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
665 || gimple_assign_rhs1 (stmt2) == arg1
666 || gimple_assign_rhs2 (stmt2) != arg1)
667 {
668 fail = true;
669 break;
670 }
671 }
672 if (fail)
673 continue;
674
675 gimple_replace_ssa_lhs (call, arg1);
676 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
677 {
678 auto_vec<tree, 4> args;
679 for (unsigned int i = 0;
680 i < gimple_call_num_args (call); i++)
681 args.safe_push (gimple_call_arg (call, i));
682 gcall *stmt2;
683 if (ifn == IFN_LAST)
684 stmt2 = gimple_build_call_vec (fndecl, args);
685 else
686 stmt2 = gimple_build_call_internal_vec (ifn, args);
687 gimple_call_set_lhs (stmt2, arg1);
688 if (gimple_vdef (call))
689 {
690 gimple_set_vdef (stmt2, gimple_vdef (call));
691 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
692 }
693 gimple_set_vuse (stmt2, gimple_vuse (call));
694 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
695 gsi_replace (&gsi2, stmt2, true);
696 }
697 else
698 {
699 if (ifn == IFN_LAST)
700 gimple_call_set_fndecl (call, fndecl);
701 else
702 gimple_call_set_internal_fn (call, ifn);
703 update_stmt (call);
704 }
705 reciprocal_stats.rfuncs_inserted++;
706
707 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
708 {
709 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
710 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
711 fold_stmt_inplace (&gsi);
712 update_stmt (stmt);
713 }
714 }
715 }
716 }
717 }
718
719 statistics_counter_event (fun, "reciprocal divs inserted",
720 reciprocal_stats.rdivs_inserted);
721 statistics_counter_event (fun, "reciprocal functions inserted",
722 reciprocal_stats.rfuncs_inserted);
723
724 free_dominance_info (CDI_DOMINATORS);
725 free_dominance_info (CDI_POST_DOMINATORS);
726 delete occ_pool;
727 return 0;
728 }
729
730 } // anon namespace
731
732 gimple_opt_pass *
733 make_pass_cse_reciprocals (gcc::context *ctxt)
734 {
735 return new pass_cse_reciprocals (ctxt);
736 }
737
738 /* Records an occurrence at statement USE_STMT in the vector of trees
739 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
740 is not yet initialized. Returns true if the occurrence was pushed on
741 the vector. Adjusts *TOP_BB to be the basic block dominating all
742 statements in the vector. */
743
744 static bool
745 maybe_record_sincos (vec<gimple *> *stmts,
746 basic_block *top_bb, gimple *use_stmt)
747 {
748 basic_block use_bb = gimple_bb (use_stmt);
749 if (*top_bb
750 && (*top_bb == use_bb
751 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
752 stmts->safe_push (use_stmt);
753 else if (!*top_bb
754 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
755 {
756 stmts->safe_push (use_stmt);
757 *top_bb = use_bb;
758 }
759 else
760 return false;
761
762 return true;
763 }
764
765 /* Look for sin, cos and cexpi calls with the same argument NAME and
766 create a single call to cexpi CSEing the result in this case.
767 We first walk over all immediate uses of the argument collecting
768 statements that we can CSE in a vector and in a second pass replace
769 the statement rhs with a REALPART or IMAGPART expression on the
770 result of the cexpi call we insert before the use statement that
771 dominates all other candidates. */
772
773 static bool
774 execute_cse_sincos_1 (tree name)
775 {
776 gimple_stmt_iterator gsi;
777 imm_use_iterator use_iter;
778 tree fndecl, res, type;
779 gimple *def_stmt, *use_stmt, *stmt;
780 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
781 auto_vec<gimple *> stmts;
782 basic_block top_bb = NULL;
783 int i;
784 bool cfg_changed = false;
785
786 type = TREE_TYPE (name);
787 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
788 {
789 if (gimple_code (use_stmt) != GIMPLE_CALL
790 || !gimple_call_lhs (use_stmt))
791 continue;
792
793 switch (gimple_call_combined_fn (use_stmt))
794 {
795 CASE_CFN_COS:
796 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
797 break;
798
799 CASE_CFN_SIN:
800 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
801 break;
802
803 CASE_CFN_CEXPI:
804 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
805 break;
806
807 default:;
808 }
809 }
810
811 if (seen_cos + seen_sin + seen_cexpi <= 1)
812 return false;
813
814 /* Simply insert cexpi at the beginning of top_bb but not earlier than
815 the name def statement. */
816 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
817 if (!fndecl)
818 return false;
819 stmt = gimple_build_call (fndecl, 1, name);
820 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
821 gimple_call_set_lhs (stmt, res);
822
823 def_stmt = SSA_NAME_DEF_STMT (name);
824 if (!SSA_NAME_IS_DEFAULT_DEF (name)
825 && gimple_code (def_stmt) != GIMPLE_PHI
826 && gimple_bb (def_stmt) == top_bb)
827 {
828 gsi = gsi_for_stmt (def_stmt);
829 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
830 }
831 else
832 {
833 gsi = gsi_after_labels (top_bb);
834 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
835 }
836 sincos_stats.inserted++;
837
838 /* And adjust the recorded old call sites. */
839 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
840 {
841 tree rhs = NULL;
842
843 switch (gimple_call_combined_fn (use_stmt))
844 {
845 CASE_CFN_COS:
846 rhs = fold_build1 (REALPART_EXPR, type, res);
847 break;
848
849 CASE_CFN_SIN:
850 rhs = fold_build1 (IMAGPART_EXPR, type, res);
851 break;
852
853 CASE_CFN_CEXPI:
854 rhs = res;
855 break;
856
857 default:;
858 gcc_unreachable ();
859 }
860
861 /* Replace call with a copy. */
862 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
863
864 gsi = gsi_for_stmt (use_stmt);
865 gsi_replace (&gsi, stmt, true);
866 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
867 cfg_changed = true;
868 }
869
870 return cfg_changed;
871 }
872
873 /* To evaluate powi(x,n), the floating point value x raised to the
874 constant integer exponent n, we use a hybrid algorithm that
875 combines the "window method" with look-up tables. For an
876 introduction to exponentiation algorithms and "addition chains",
877 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
878 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
879 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
880 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
881
882 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
883 multiplications to inline before calling the system library's pow
884 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
885 so this default never requires calling pow, powf or powl. */
886
887 #ifndef POWI_MAX_MULTS
888 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
889 #endif
890
891 /* The size of the "optimal power tree" lookup table. All
892 exponents less than this value are simply looked up in the
893 powi_table below. This threshold is also used to size the
894 cache of pseudo registers that hold intermediate results. */
895 #define POWI_TABLE_SIZE 256
896
897 /* The size, in bits of the window, used in the "window method"
898 exponentiation algorithm. This is equivalent to a radix of
899 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
900 #define POWI_WINDOW_SIZE 3
901
902 /* The following table is an efficient representation of an
903 "optimal power tree". For each value, i, the corresponding
904 value, j, in the table states than an optimal evaluation
905 sequence for calculating pow(x,i) can be found by evaluating
906 pow(x,j)*pow(x,i-j). An optimal power tree for the first
907 100 integers is given in Knuth's "Seminumerical algorithms". */
908
909 static const unsigned char powi_table[POWI_TABLE_SIZE] =
910 {
911 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
912 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
913 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
914 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
915 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
916 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
917 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
918 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
919 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
920 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
921 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
922 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
923 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
924 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
925 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
926 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
927 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
928 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
929 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
930 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
931 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
932 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
933 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
934 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
935 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
936 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
937 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
938 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
939 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
940 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
941 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
942 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
943 };
944
945
946 /* Return the number of multiplications required to calculate
947 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
948 subroutine of powi_cost. CACHE is an array indicating
949 which exponents have already been calculated. */
950
951 static int
952 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
953 {
954 /* If we've already calculated this exponent, then this evaluation
955 doesn't require any additional multiplications. */
956 if (cache[n])
957 return 0;
958
959 cache[n] = true;
960 return powi_lookup_cost (n - powi_table[n], cache)
961 + powi_lookup_cost (powi_table[n], cache) + 1;
962 }
963
964 /* Return the number of multiplications required to calculate
965 powi(x,n) for an arbitrary x, given the exponent N. This
966 function needs to be kept in sync with powi_as_mults below. */
967
968 static int
969 powi_cost (HOST_WIDE_INT n)
970 {
971 bool cache[POWI_TABLE_SIZE];
972 unsigned HOST_WIDE_INT digit;
973 unsigned HOST_WIDE_INT val;
974 int result;
975
976 if (n == 0)
977 return 0;
978
979 /* Ignore the reciprocal when calculating the cost. */
980 val = (n < 0) ? -n : n;
981
982 /* Initialize the exponent cache. */
983 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
984 cache[1] = true;
985
986 result = 0;
987
988 while (val >= POWI_TABLE_SIZE)
989 {
990 if (val & 1)
991 {
992 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
993 result += powi_lookup_cost (digit, cache)
994 + POWI_WINDOW_SIZE + 1;
995 val >>= POWI_WINDOW_SIZE;
996 }
997 else
998 {
999 val >>= 1;
1000 result++;
1001 }
1002 }
1003
1004 return result + powi_lookup_cost (val, cache);
1005 }
1006
1007 /* Recursive subroutine of powi_as_mults. This function takes the
1008 array, CACHE, of already calculated exponents and an exponent N and
1009 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1010
1011 static tree
1012 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1013 HOST_WIDE_INT n, tree *cache)
1014 {
1015 tree op0, op1, ssa_target;
1016 unsigned HOST_WIDE_INT digit;
1017 gassign *mult_stmt;
1018
1019 if (n < POWI_TABLE_SIZE && cache[n])
1020 return cache[n];
1021
1022 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1023
1024 if (n < POWI_TABLE_SIZE)
1025 {
1026 cache[n] = ssa_target;
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1029 }
1030 else if (n & 1)
1031 {
1032 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1033 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1034 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1035 }
1036 else
1037 {
1038 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1039 op1 = op0;
1040 }
1041
1042 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1043 gimple_set_location (mult_stmt, loc);
1044 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1045
1046 return ssa_target;
1047 }
1048
1049 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1050 This function needs to be kept in sync with powi_cost above. */
1051
1052 static tree
1053 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1054 tree arg0, HOST_WIDE_INT n)
1055 {
1056 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1057 gassign *div_stmt;
1058 tree target;
1059
1060 if (n == 0)
1061 return build_real (type, dconst1);
1062
1063 memset (cache, 0, sizeof (cache));
1064 cache[1] = arg0;
1065
1066 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1067 if (n >= 0)
1068 return result;
1069
1070 /* If the original exponent was negative, reciprocate the result. */
1071 target = make_temp_ssa_name (type, NULL, "powmult");
1072 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1073 build_real (type, dconst1), result);
1074 gimple_set_location (div_stmt, loc);
1075 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1076
1077 return target;
1078 }
1079
1080 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1081 location info LOC. If the arguments are appropriate, create an
1082 equivalent sequence of statements prior to GSI using an optimal
1083 number of multiplications, and return an expession holding the
1084 result. */
1085
1086 static tree
1087 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1088 tree arg0, HOST_WIDE_INT n)
1089 {
1090 /* Avoid largest negative number. */
1091 if (n != -n
1092 && ((n >= -1 && n <= 2)
1093 || (optimize_function_for_speed_p (cfun)
1094 && powi_cost (n) <= POWI_MAX_MULTS)))
1095 return powi_as_mults (gsi, loc, arg0, n);
1096
1097 return NULL_TREE;
1098 }
1099
1100 /* Build a gimple call statement that calls FN with argument ARG.
1101 Set the lhs of the call statement to a fresh SSA name. Insert the
1102 statement prior to GSI's current position, and return the fresh
1103 SSA name. */
1104
1105 static tree
1106 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1107 tree fn, tree arg)
1108 {
1109 gcall *call_stmt;
1110 tree ssa_target;
1111
1112 call_stmt = gimple_build_call (fn, 1, arg);
1113 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1114 gimple_set_lhs (call_stmt, ssa_target);
1115 gimple_set_location (call_stmt, loc);
1116 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1117
1118 return ssa_target;
1119 }
1120
1121 /* Build a gimple binary operation with the given CODE and arguments
1122 ARG0, ARG1, assigning the result to a new SSA name for variable
1123 TARGET. Insert the statement prior to GSI's current position, and
1124 return the fresh SSA name.*/
1125
1126 static tree
1127 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1128 const char *name, enum tree_code code,
1129 tree arg0, tree arg1)
1130 {
1131 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1132 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1133 gimple_set_location (stmt, loc);
1134 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1135 return result;
1136 }
1137
1138 /* Build a gimple reference operation with the given CODE and argument
1139 ARG, assigning the result to a new SSA name of TYPE with NAME.
1140 Insert the statement prior to GSI's current position, and return
1141 the fresh SSA name. */
1142
1143 static inline tree
1144 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1145 const char *name, enum tree_code code, tree arg0)
1146 {
1147 tree result = make_temp_ssa_name (type, NULL, name);
1148 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1149 gimple_set_location (stmt, loc);
1150 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1151 return result;
1152 }
1153
1154 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1155 prior to GSI's current position, and return the fresh SSA name. */
1156
1157 static tree
1158 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1159 tree type, tree val)
1160 {
1161 tree result = make_ssa_name (type);
1162 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1163 gimple_set_location (stmt, loc);
1164 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1165 return result;
1166 }
1167
1168 struct pow_synth_sqrt_info
1169 {
1170 bool *factors;
1171 unsigned int deepest;
1172 unsigned int num_mults;
1173 };
1174
1175 /* Return true iff the real value C can be represented as a
1176 sum of powers of 0.5 up to N. That is:
1177 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1178 Record in INFO the various parameters of the synthesis algorithm such
1179 as the factors a[i], the maximum 0.5 power and the number of
1180 multiplications that will be required. */
1181
1182 bool
1183 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1184 struct pow_synth_sqrt_info *info)
1185 {
1186 REAL_VALUE_TYPE factor = dconsthalf;
1187 REAL_VALUE_TYPE remainder = c;
1188
1189 info->deepest = 0;
1190 info->num_mults = 0;
1191 memset (info->factors, 0, n * sizeof (bool));
1192
1193 for (unsigned i = 0; i < n; i++)
1194 {
1195 REAL_VALUE_TYPE res;
1196
1197 /* If something inexact happened bail out now. */
1198 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1199 return false;
1200
1201 /* We have hit zero. The number is representable as a sum
1202 of powers of 0.5. */
1203 if (real_equal (&res, &dconst0))
1204 {
1205 info->factors[i] = true;
1206 info->deepest = i + 1;
1207 return true;
1208 }
1209 else if (!REAL_VALUE_NEGATIVE (res))
1210 {
1211 remainder = res;
1212 info->factors[i] = true;
1213 info->num_mults++;
1214 }
1215 else
1216 info->factors[i] = false;
1217
1218 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1219 }
1220 return false;
1221 }
1222
1223 /* Return the tree corresponding to FN being applied
1224 to ARG N times at GSI and LOC.
1225 Look up previous results from CACHE if need be.
1226 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1227
1228 static tree
1229 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1230 tree fn, location_t loc, tree *cache)
1231 {
1232 tree res = cache[n];
1233 if (!res)
1234 {
1235 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1236 res = build_and_insert_call (gsi, loc, fn, prev);
1237 cache[n] = res;
1238 }
1239
1240 return res;
1241 }
1242
1243 /* Print to STREAM the repeated application of function FNAME to ARG
1244 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1245 "foo (foo (x))". */
1246
1247 static void
1248 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1249 unsigned int n)
1250 {
1251 if (n == 0)
1252 fprintf (stream, "%s", arg);
1253 else
1254 {
1255 fprintf (stream, "%s (", fname);
1256 print_nested_fn (stream, fname, arg, n - 1);
1257 fprintf (stream, ")");
1258 }
1259 }
1260
1261 /* Print to STREAM the fractional sequence of sqrt chains
1262 applied to ARG, described by INFO. Used for the dump file. */
1263
1264 static void
1265 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1266 struct pow_synth_sqrt_info *info)
1267 {
1268 for (unsigned int i = 0; i < info->deepest; i++)
1269 {
1270 bool is_set = info->factors[i];
1271 if (is_set)
1272 {
1273 print_nested_fn (stream, "sqrt", arg, i + 1);
1274 if (i != info->deepest - 1)
1275 fprintf (stream, " * ");
1276 }
1277 }
1278 }
1279
1280 /* Print to STREAM a representation of raising ARG to an integer
1281 power N. Used for the dump file. */
1282
1283 static void
1284 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1285 {
1286 if (n > 1)
1287 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1288 else if (n == 1)
1289 fprintf (stream, "%s", arg);
1290 }
1291
1292 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1293 square roots. Place at GSI and LOC. Limit the maximum depth
1294 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1295 result of the expanded sequence or NULL_TREE if the expansion failed.
1296
1297 This routine assumes that ARG1 is a real number with a fractional part
1298 (the integer exponent case will have been handled earlier in
1299 gimple_expand_builtin_pow).
1300
1301 For ARG1 > 0.0:
1302 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1303 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1304 FRAC_PART == ARG1 - WHOLE_PART:
1305 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1306 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1307 if it can be expressed as such, that is if FRAC_PART satisfies:
1308 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1309 where integer a[i] is either 0 or 1.
1310
1311 Example:
1312 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1313 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1314
1315 For ARG1 < 0.0 there are two approaches:
1316 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1317 is calculated as above.
1318
1319 Example:
1320 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1321 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1322
1323 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1324 FRAC_PART := ARG1 - WHOLE_PART
1325 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1326 Example:
1327 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1328 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1329
1330 For ARG1 < 0.0 we choose between (A) and (B) depending on
1331 how many multiplications we'd have to do.
1332 So, for the example in (B): POW (x, -5.875), if we were to
1333 follow algorithm (A) we would produce:
1334 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1335 which contains more multiplications than approach (B).
1336
1337 Hopefully, this approach will eliminate potentially expensive POW library
1338 calls when unsafe floating point math is enabled and allow the compiler to
1339 further optimise the multiplies, square roots and divides produced by this
1340 function. */
1341
1342 static tree
1343 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1344 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1345 {
1346 tree type = TREE_TYPE (arg0);
1347 machine_mode mode = TYPE_MODE (type);
1348 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1349 bool one_over = true;
1350
1351 if (!sqrtfn)
1352 return NULL_TREE;
1353
1354 if (TREE_CODE (arg1) != REAL_CST)
1355 return NULL_TREE;
1356
1357 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1358
1359 gcc_assert (max_depth > 0);
1360 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1361
1362 struct pow_synth_sqrt_info synth_info;
1363 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1364 synth_info.deepest = 0;
1365 synth_info.num_mults = 0;
1366
1367 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1368 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1369
1370 /* The whole and fractional parts of exp. */
1371 REAL_VALUE_TYPE whole_part;
1372 REAL_VALUE_TYPE frac_part;
1373
1374 real_floor (&whole_part, mode, &exp);
1375 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1376
1377
1378 REAL_VALUE_TYPE ceil_whole = dconst0;
1379 REAL_VALUE_TYPE ceil_fract = dconst0;
1380
1381 if (neg_exp)
1382 {
1383 real_ceil (&ceil_whole, mode, &exp);
1384 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1385 }
1386
1387 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1388 return NULL_TREE;
1389
1390 /* Check whether it's more profitable to not use 1.0 / ... */
1391 if (neg_exp)
1392 {
1393 struct pow_synth_sqrt_info alt_synth_info;
1394 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1395 alt_synth_info.deepest = 0;
1396 alt_synth_info.num_mults = 0;
1397
1398 if (representable_as_half_series_p (ceil_fract, max_depth,
1399 &alt_synth_info)
1400 && alt_synth_info.deepest <= synth_info.deepest
1401 && alt_synth_info.num_mults < synth_info.num_mults)
1402 {
1403 whole_part = ceil_whole;
1404 frac_part = ceil_fract;
1405 synth_info.deepest = alt_synth_info.deepest;
1406 synth_info.num_mults = alt_synth_info.num_mults;
1407 memcpy (synth_info.factors, alt_synth_info.factors,
1408 (max_depth + 1) * sizeof (bool));
1409 one_over = false;
1410 }
1411 }
1412
1413 HOST_WIDE_INT n = real_to_integer (&whole_part);
1414 REAL_VALUE_TYPE cint;
1415 real_from_integer (&cint, VOIDmode, n, SIGNED);
1416
1417 if (!real_identical (&whole_part, &cint))
1418 return NULL_TREE;
1419
1420 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1421 return NULL_TREE;
1422
1423 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1424
1425 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1426
1427 /* Calculate the integer part of the exponent. */
1428 if (n > 1)
1429 {
1430 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1431 if (!integer_res)
1432 return NULL_TREE;
1433 }
1434
1435 if (dump_file)
1436 {
1437 char string[64];
1438
1439 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1440 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1441
1442 if (neg_exp)
1443 {
1444 if (one_over)
1445 {
1446 fprintf (dump_file, "1.0 / (");
1447 dump_integer_part (dump_file, "x", n);
1448 if (n > 0)
1449 fprintf (dump_file, " * ");
1450 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1451 fprintf (dump_file, ")");
1452 }
1453 else
1454 {
1455 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1456 fprintf (dump_file, " / (");
1457 dump_integer_part (dump_file, "x", n);
1458 fprintf (dump_file, ")");
1459 }
1460 }
1461 else
1462 {
1463 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1464 if (n > 0)
1465 fprintf (dump_file, " * ");
1466 dump_integer_part (dump_file, "x", n);
1467 }
1468
1469 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1470 }
1471
1472
1473 tree fract_res = NULL_TREE;
1474 cache[0] = arg0;
1475
1476 /* Calculate the fractional part of the exponent. */
1477 for (unsigned i = 0; i < synth_info.deepest; i++)
1478 {
1479 if (synth_info.factors[i])
1480 {
1481 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1482
1483 if (!fract_res)
1484 fract_res = sqrt_chain;
1485
1486 else
1487 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1488 fract_res, sqrt_chain);
1489 }
1490 }
1491
1492 tree res = NULL_TREE;
1493
1494 if (neg_exp)
1495 {
1496 if (one_over)
1497 {
1498 if (n > 0)
1499 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1500 fract_res, integer_res);
1501 else
1502 res = fract_res;
1503
1504 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1505 build_real (type, dconst1), res);
1506 }
1507 else
1508 {
1509 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1510 fract_res, integer_res);
1511 }
1512 }
1513 else
1514 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1515 fract_res, integer_res);
1516 return res;
1517 }
1518
1519 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1520 with location info LOC. If possible, create an equivalent and
1521 less expensive sequence of statements prior to GSI, and return an
1522 expession holding the result. */
1523
1524 static tree
1525 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1526 tree arg0, tree arg1)
1527 {
1528 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1529 REAL_VALUE_TYPE c2, dconst3;
1530 HOST_WIDE_INT n;
1531 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1532 machine_mode mode;
1533 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1534 bool hw_sqrt_exists, c_is_int, c2_is_int;
1535
1536 dconst1_4 = dconst1;
1537 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1538
1539 /* If the exponent isn't a constant, there's nothing of interest
1540 to be done. */
1541 if (TREE_CODE (arg1) != REAL_CST)
1542 return NULL_TREE;
1543
1544 /* Don't perform the operation if flag_signaling_nans is on
1545 and the operand is a signaling NaN. */
1546 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1547 && ((TREE_CODE (arg0) == REAL_CST
1548 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1549 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1550 return NULL_TREE;
1551
1552 /* If the exponent is equivalent to an integer, expand to an optimal
1553 multiplication sequence when profitable. */
1554 c = TREE_REAL_CST (arg1);
1555 n = real_to_integer (&c);
1556 real_from_integer (&cint, VOIDmode, n, SIGNED);
1557 c_is_int = real_identical (&c, &cint);
1558
1559 if (c_is_int
1560 && ((n >= -1 && n <= 2)
1561 || (flag_unsafe_math_optimizations
1562 && speed_p
1563 && powi_cost (n) <= POWI_MAX_MULTS)))
1564 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1565
1566 /* Attempt various optimizations using sqrt and cbrt. */
1567 type = TREE_TYPE (arg0);
1568 mode = TYPE_MODE (type);
1569 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1570
1571 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1572 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1573 sqrt(-0) = -0. */
1574 if (sqrtfn
1575 && real_equal (&c, &dconsthalf)
1576 && !HONOR_SIGNED_ZEROS (mode))
1577 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1578
1579 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1580
1581 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1582 optimizations since 1./3. is not exactly representable. If x
1583 is negative and finite, the correct value of pow(x,1./3.) is
1584 a NaN with the "invalid" exception raised, because the value
1585 of 1./3. actually has an even denominator. The correct value
1586 of cbrt(x) is a negative real value. */
1587 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1588 dconst1_3 = real_value_truncate (mode, dconst_third ());
1589
1590 if (flag_unsafe_math_optimizations
1591 && cbrtfn
1592 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1593 && real_equal (&c, &dconst1_3))
1594 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1595
1596 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1597 if we don't have a hardware sqrt insn. */
1598 dconst1_6 = dconst1_3;
1599 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1600
1601 if (flag_unsafe_math_optimizations
1602 && sqrtfn
1603 && cbrtfn
1604 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1605 && speed_p
1606 && hw_sqrt_exists
1607 && real_equal (&c, &dconst1_6))
1608 {
1609 /* sqrt(x) */
1610 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1611
1612 /* cbrt(sqrt(x)) */
1613 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1614 }
1615
1616
1617 /* Attempt to expand the POW as a product of square root chains.
1618 Expand the 0.25 case even when otpimising for size. */
1619 if (flag_unsafe_math_optimizations
1620 && sqrtfn
1621 && hw_sqrt_exists
1622 && (speed_p || real_equal (&c, &dconst1_4))
1623 && !HONOR_SIGNED_ZEROS (mode))
1624 {
1625 unsigned int max_depth = speed_p
1626 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1627 : 2;
1628
1629 tree expand_with_sqrts
1630 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1631
1632 if (expand_with_sqrts)
1633 return expand_with_sqrts;
1634 }
1635
1636 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1637 n = real_to_integer (&c2);
1638 real_from_integer (&cint, VOIDmode, n, SIGNED);
1639 c2_is_int = real_identical (&c2, &cint);
1640
1641 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1642
1643 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1644 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1645
1646 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1647 different from pow(x, 1./3.) due to rounding and behavior with
1648 negative x, we need to constrain this transformation to unsafe
1649 math and positive x or finite math. */
1650 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1651 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1652 real_round (&c2, mode, &c2);
1653 n = real_to_integer (&c2);
1654 real_from_integer (&cint, VOIDmode, n, SIGNED);
1655 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1656 real_convert (&c2, mode, &c2);
1657
1658 if (flag_unsafe_math_optimizations
1659 && cbrtfn
1660 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1661 && real_identical (&c2, &c)
1662 && !c2_is_int
1663 && optimize_function_for_speed_p (cfun)
1664 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1665 {
1666 tree powi_x_ndiv3 = NULL_TREE;
1667
1668 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1669 possible or profitable, give up. Skip the degenerate case when
1670 abs(n) < 3, where the result is always 1. */
1671 if (absu_hwi (n) >= 3)
1672 {
1673 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1674 abs_hwi (n / 3));
1675 if (!powi_x_ndiv3)
1676 return NULL_TREE;
1677 }
1678
1679 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1680 as that creates an unnecessary variable. Instead, just produce
1681 either cbrt(x) or cbrt(x) * cbrt(x). */
1682 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1683
1684 if (absu_hwi (n) % 3 == 1)
1685 powi_cbrt_x = cbrt_x;
1686 else
1687 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1688 cbrt_x, cbrt_x);
1689
1690 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1691 if (absu_hwi (n) < 3)
1692 result = powi_cbrt_x;
1693 else
1694 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1695 powi_x_ndiv3, powi_cbrt_x);
1696
1697 /* If n is negative, reciprocate the result. */
1698 if (n < 0)
1699 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1700 build_real (type, dconst1), result);
1701
1702 return result;
1703 }
1704
1705 /* No optimizations succeeded. */
1706 return NULL_TREE;
1707 }
1708
1709 /* ARG is the argument to a cabs builtin call in GSI with location info
1710 LOC. Create a sequence of statements prior to GSI that calculates
1711 sqrt(R*R + I*I), where R and I are the real and imaginary components
1712 of ARG, respectively. Return an expression holding the result. */
1713
1714 static tree
1715 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1716 {
1717 tree real_part, imag_part, addend1, addend2, sum, result;
1718 tree type = TREE_TYPE (TREE_TYPE (arg));
1719 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1720 machine_mode mode = TYPE_MODE (type);
1721
1722 if (!flag_unsafe_math_optimizations
1723 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1724 || !sqrtfn
1725 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1726 return NULL_TREE;
1727
1728 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1729 REALPART_EXPR, arg);
1730 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1731 real_part, real_part);
1732 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1733 IMAGPART_EXPR, arg);
1734 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1735 imag_part, imag_part);
1736 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1737 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1738
1739 return result;
1740 }
1741
1742 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1743 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1744 an optimal number of multiplies, when n is a constant. */
1745
1746 namespace {
1747
1748 const pass_data pass_data_cse_sincos =
1749 {
1750 GIMPLE_PASS, /* type */
1751 "sincos", /* name */
1752 OPTGROUP_NONE, /* optinfo_flags */
1753 TV_NONE, /* tv_id */
1754 PROP_ssa, /* properties_required */
1755 PROP_gimple_opt_math, /* properties_provided */
1756 0, /* properties_destroyed */
1757 0, /* todo_flags_start */
1758 TODO_update_ssa, /* todo_flags_finish */
1759 };
1760
1761 class pass_cse_sincos : public gimple_opt_pass
1762 {
1763 public:
1764 pass_cse_sincos (gcc::context *ctxt)
1765 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1766 {}
1767
1768 /* opt_pass methods: */
1769 virtual bool gate (function *)
1770 {
1771 /* We no longer require either sincos or cexp, since powi expansion
1772 piggybacks on this pass. */
1773 return optimize;
1774 }
1775
1776 virtual unsigned int execute (function *);
1777
1778 }; // class pass_cse_sincos
1779
1780 unsigned int
1781 pass_cse_sincos::execute (function *fun)
1782 {
1783 basic_block bb;
1784 bool cfg_changed = false;
1785
1786 calculate_dominance_info (CDI_DOMINATORS);
1787 memset (&sincos_stats, 0, sizeof (sincos_stats));
1788
1789 FOR_EACH_BB_FN (bb, fun)
1790 {
1791 gimple_stmt_iterator gsi;
1792 bool cleanup_eh = false;
1793
1794 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1795 {
1796 gimple *stmt = gsi_stmt (gsi);
1797
1798 /* Only the last stmt in a bb could throw, no need to call
1799 gimple_purge_dead_eh_edges if we change something in the middle
1800 of a basic block. */
1801 cleanup_eh = false;
1802
1803 if (is_gimple_call (stmt)
1804 && gimple_call_lhs (stmt))
1805 {
1806 tree arg, arg0, arg1, result;
1807 HOST_WIDE_INT n;
1808 location_t loc;
1809
1810 switch (gimple_call_combined_fn (stmt))
1811 {
1812 CASE_CFN_COS:
1813 CASE_CFN_SIN:
1814 CASE_CFN_CEXPI:
1815 /* Make sure we have either sincos or cexp. */
1816 if (!targetm.libc_has_function (function_c99_math_complex)
1817 && !targetm.libc_has_function (function_sincos))
1818 break;
1819
1820 arg = gimple_call_arg (stmt, 0);
1821 if (TREE_CODE (arg) == SSA_NAME)
1822 cfg_changed |= execute_cse_sincos_1 (arg);
1823 break;
1824
1825 CASE_CFN_POW:
1826 arg0 = gimple_call_arg (stmt, 0);
1827 arg1 = gimple_call_arg (stmt, 1);
1828
1829 loc = gimple_location (stmt);
1830 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1831
1832 if (result)
1833 {
1834 tree lhs = gimple_get_lhs (stmt);
1835 gassign *new_stmt = gimple_build_assign (lhs, result);
1836 gimple_set_location (new_stmt, loc);
1837 unlink_stmt_vdef (stmt);
1838 gsi_replace (&gsi, new_stmt, true);
1839 cleanup_eh = true;
1840 if (gimple_vdef (stmt))
1841 release_ssa_name (gimple_vdef (stmt));
1842 }
1843 break;
1844
1845 CASE_CFN_POWI:
1846 arg0 = gimple_call_arg (stmt, 0);
1847 arg1 = gimple_call_arg (stmt, 1);
1848 loc = gimple_location (stmt);
1849
1850 if (real_minus_onep (arg0))
1851 {
1852 tree t0, t1, cond, one, minus_one;
1853 gassign *stmt;
1854
1855 t0 = TREE_TYPE (arg0);
1856 t1 = TREE_TYPE (arg1);
1857 one = build_real (t0, dconst1);
1858 minus_one = build_real (t0, dconstm1);
1859
1860 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1861 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1862 arg1, build_int_cst (t1, 1));
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1865
1866 result = make_temp_ssa_name (t0, NULL, "powi");
1867 stmt = gimple_build_assign (result, COND_EXPR, cond,
1868 minus_one, one);
1869 gimple_set_location (stmt, loc);
1870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1871 }
1872 else
1873 {
1874 if (!tree_fits_shwi_p (arg1))
1875 break;
1876
1877 n = tree_to_shwi (arg1);
1878 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1879 }
1880
1881 if (result)
1882 {
1883 tree lhs = gimple_get_lhs (stmt);
1884 gassign *new_stmt = gimple_build_assign (lhs, result);
1885 gimple_set_location (new_stmt, loc);
1886 unlink_stmt_vdef (stmt);
1887 gsi_replace (&gsi, new_stmt, true);
1888 cleanup_eh = true;
1889 if (gimple_vdef (stmt))
1890 release_ssa_name (gimple_vdef (stmt));
1891 }
1892 break;
1893
1894 CASE_CFN_CABS:
1895 arg0 = gimple_call_arg (stmt, 0);
1896 loc = gimple_location (stmt);
1897 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1898
1899 if (result)
1900 {
1901 tree lhs = gimple_get_lhs (stmt);
1902 gassign *new_stmt = gimple_build_assign (lhs, result);
1903 gimple_set_location (new_stmt, loc);
1904 unlink_stmt_vdef (stmt);
1905 gsi_replace (&gsi, new_stmt, true);
1906 cleanup_eh = true;
1907 if (gimple_vdef (stmt))
1908 release_ssa_name (gimple_vdef (stmt));
1909 }
1910 break;
1911
1912 default:;
1913 }
1914 }
1915 }
1916 if (cleanup_eh)
1917 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1918 }
1919
1920 statistics_counter_event (fun, "sincos statements inserted",
1921 sincos_stats.inserted);
1922
1923 return cfg_changed ? TODO_cleanup_cfg : 0;
1924 }
1925
1926 } // anon namespace
1927
1928 gimple_opt_pass *
1929 make_pass_cse_sincos (gcc::context *ctxt)
1930 {
1931 return new pass_cse_sincos (ctxt);
1932 }
1933
1934 /* A symbolic number is used to detect byte permutation and selection
1935 patterns. Therefore the field N contains an artificial number
1936 consisting of octet sized markers:
1937
1938 0 - target byte has the value 0
1939 FF - target byte has an unknown value (eg. due to sign extension)
1940 1..size - marker value is the target byte index minus one.
1941
1942 To detect permutations on memory sources (arrays and structures), a symbolic
1943 number is also associated a base address (the array or structure the load is
1944 made from), an offset from the base address and a range which gives the
1945 difference between the highest and lowest accessed memory location to make
1946 such a symbolic number. The range is thus different from size which reflects
1947 the size of the type of current expression. Note that for non memory source,
1948 range holds the same value as size.
1949
1950 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1951 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1952 still have a size of 2 but this time a range of 1. */
1953
1954 struct symbolic_number {
1955 uint64_t n;
1956 tree type;
1957 tree base_addr;
1958 tree offset;
1959 HOST_WIDE_INT bytepos;
1960 tree src;
1961 tree alias_set;
1962 tree vuse;
1963 unsigned HOST_WIDE_INT range;
1964 };
1965
1966 #define BITS_PER_MARKER 8
1967 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1968 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1969 #define HEAD_MARKER(n, size) \
1970 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1971
1972 /* The number which the find_bswap_or_nop_1 result should match in
1973 order to have a nop. The number is masked according to the size of
1974 the symbolic number before using it. */
1975 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1976 (uint64_t)0x08070605 << 32 | 0x04030201)
1977
1978 /* The number which the find_bswap_or_nop_1 result should match in
1979 order to have a byte swap. The number is masked according to the
1980 size of the symbolic number before using it. */
1981 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1982 (uint64_t)0x01020304 << 32 | 0x05060708)
1983
1984 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1985 number N. Return false if the requested operation is not permitted
1986 on a symbolic number. */
1987
1988 static inline bool
1989 do_shift_rotate (enum tree_code code,
1990 struct symbolic_number *n,
1991 int count)
1992 {
1993 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1994 unsigned head_marker;
1995
1996 if (count % BITS_PER_UNIT != 0)
1997 return false;
1998 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
1999
2000 /* Zero out the extra bits of N in order to avoid them being shifted
2001 into the significant bits. */
2002 if (size < 64 / BITS_PER_MARKER)
2003 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2004
2005 switch (code)
2006 {
2007 case LSHIFT_EXPR:
2008 n->n <<= count;
2009 break;
2010 case RSHIFT_EXPR:
2011 head_marker = HEAD_MARKER (n->n, size);
2012 n->n >>= count;
2013 /* Arithmetic shift of signed type: result is dependent on the value. */
2014 if (!TYPE_UNSIGNED (n->type) && head_marker)
2015 for (i = 0; i < count / BITS_PER_MARKER; i++)
2016 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2017 << ((size - 1 - i) * BITS_PER_MARKER);
2018 break;
2019 case LROTATE_EXPR:
2020 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2021 break;
2022 case RROTATE_EXPR:
2023 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2024 break;
2025 default:
2026 return false;
2027 }
2028 /* Zero unused bits for size. */
2029 if (size < 64 / BITS_PER_MARKER)
2030 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2031 return true;
2032 }
2033
2034 /* Perform sanity checking for the symbolic number N and the gimple
2035 statement STMT. */
2036
2037 static inline bool
2038 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2039 {
2040 tree lhs_type;
2041
2042 lhs_type = gimple_expr_type (stmt);
2043
2044 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2045 return false;
2046
2047 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2048 return false;
2049
2050 return true;
2051 }
2052
2053 /* Initialize the symbolic number N for the bswap pass from the base element
2054 SRC manipulated by the bitwise OR expression. */
2055
2056 static bool
2057 init_symbolic_number (struct symbolic_number *n, tree src)
2058 {
2059 int size;
2060
2061 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
2062 return false;
2063
2064 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2065 n->src = src;
2066
2067 /* Set up the symbolic number N by setting each byte to a value between 1 and
2068 the byte size of rhs1. The highest order byte is set to n->size and the
2069 lowest order byte to 1. */
2070 n->type = TREE_TYPE (src);
2071 size = TYPE_PRECISION (n->type);
2072 if (size % BITS_PER_UNIT != 0)
2073 return false;
2074 size /= BITS_PER_UNIT;
2075 if (size > 64 / BITS_PER_MARKER)
2076 return false;
2077 n->range = size;
2078 n->n = CMPNOP;
2079
2080 if (size < 64 / BITS_PER_MARKER)
2081 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2082
2083 return true;
2084 }
2085
2086 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2087 the answer. If so, REF is that memory source and the base of the memory area
2088 accessed and the offset of the access from that base are recorded in N. */
2089
2090 bool
2091 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2092 {
2093 /* Leaf node is an array or component ref. Memorize its base and
2094 offset from base to compare to other such leaf node. */
2095 HOST_WIDE_INT bitsize, bitpos;
2096 machine_mode mode;
2097 int unsignedp, reversep, volatilep;
2098 tree offset, base_addr;
2099
2100 /* Not prepared to handle PDP endian. */
2101 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2102 return false;
2103
2104 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2105 return false;
2106
2107 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2108 &unsignedp, &reversep, &volatilep);
2109
2110 if (TREE_CODE (base_addr) == MEM_REF)
2111 {
2112 offset_int bit_offset = 0;
2113 tree off = TREE_OPERAND (base_addr, 1);
2114
2115 if (!integer_zerop (off))
2116 {
2117 offset_int boff, coff = mem_ref_offset (base_addr);
2118 boff = coff << LOG2_BITS_PER_UNIT;
2119 bit_offset += boff;
2120 }
2121
2122 base_addr = TREE_OPERAND (base_addr, 0);
2123
2124 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2125 if (wi::neg_p (bit_offset))
2126 {
2127 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2128 offset_int tem = bit_offset.and_not (mask);
2129 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2130 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2131 bit_offset -= tem;
2132 tem >>= LOG2_BITS_PER_UNIT;
2133 if (offset)
2134 offset = size_binop (PLUS_EXPR, offset,
2135 wide_int_to_tree (sizetype, tem));
2136 else
2137 offset = wide_int_to_tree (sizetype, tem);
2138 }
2139
2140 bitpos += bit_offset.to_shwi ();
2141 }
2142
2143 if (bitpos % BITS_PER_UNIT)
2144 return false;
2145 if (bitsize % BITS_PER_UNIT)
2146 return false;
2147 if (reversep)
2148 return false;
2149
2150 if (!init_symbolic_number (n, ref))
2151 return false;
2152 n->base_addr = base_addr;
2153 n->offset = offset;
2154 n->bytepos = bitpos / BITS_PER_UNIT;
2155 n->alias_set = reference_alias_ptr_type (ref);
2156 n->vuse = gimple_vuse (stmt);
2157 return true;
2158 }
2159
2160 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2161 symbolic number N1 and N2 whose source statements are respectively
2162 SOURCE_STMT1 and SOURCE_STMT2. */
2163
2164 static gimple *
2165 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2166 gimple *source_stmt2, struct symbolic_number *n2,
2167 struct symbolic_number *n)
2168 {
2169 int i, size;
2170 uint64_t mask;
2171 gimple *source_stmt;
2172 struct symbolic_number *n_start;
2173
2174 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
2175 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2176 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2177 rhs1 = TREE_OPERAND (rhs1, 0);
2178 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
2179 if (TREE_CODE (rhs2) == BIT_FIELD_REF
2180 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
2181 rhs2 = TREE_OPERAND (rhs2, 0);
2182
2183 /* Sources are different, cancel bswap if they are not memory location with
2184 the same base (array, structure, ...). */
2185 if (rhs1 != rhs2)
2186 {
2187 uint64_t inc;
2188 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2189 struct symbolic_number *toinc_n_ptr, *n_end;
2190 basic_block bb1, bb2;
2191
2192 if (!n1->base_addr || !n2->base_addr
2193 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2194 return NULL;
2195
2196 if (!n1->offset != !n2->offset
2197 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2198 return NULL;
2199
2200 if (n1->bytepos < n2->bytepos)
2201 {
2202 n_start = n1;
2203 start_sub = n2->bytepos - n1->bytepos;
2204 }
2205 else
2206 {
2207 n_start = n2;
2208 start_sub = n1->bytepos - n2->bytepos;
2209 }
2210
2211 bb1 = gimple_bb (source_stmt1);
2212 bb2 = gimple_bb (source_stmt2);
2213 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
2214 source_stmt = source_stmt1;
2215 else
2216 source_stmt = source_stmt2;
2217
2218 /* Find the highest address at which a load is performed and
2219 compute related info. */
2220 end1 = n1->bytepos + (n1->range - 1);
2221 end2 = n2->bytepos + (n2->range - 1);
2222 if (end1 < end2)
2223 {
2224 end = end2;
2225 end_sub = end2 - end1;
2226 }
2227 else
2228 {
2229 end = end1;
2230 end_sub = end1 - end2;
2231 }
2232 n_end = (end2 > end1) ? n2 : n1;
2233
2234 /* Find symbolic number whose lsb is the most significant. */
2235 if (BYTES_BIG_ENDIAN)
2236 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2237 else
2238 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2239
2240 n->range = end - n_start->bytepos + 1;
2241
2242 /* Check that the range of memory covered can be represented by
2243 a symbolic number. */
2244 if (n->range > 64 / BITS_PER_MARKER)
2245 return NULL;
2246
2247 /* Reinterpret byte marks in symbolic number holding the value of
2248 bigger weight according to target endianness. */
2249 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2250 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2251 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2252 {
2253 unsigned marker
2254 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2255 if (marker && marker != MARKER_BYTE_UNKNOWN)
2256 toinc_n_ptr->n += inc;
2257 }
2258 }
2259 else
2260 {
2261 n->range = n1->range;
2262 n_start = n1;
2263 source_stmt = source_stmt1;
2264 }
2265
2266 if (!n1->alias_set
2267 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2268 n->alias_set = n1->alias_set;
2269 else
2270 n->alias_set = ptr_type_node;
2271 n->vuse = n_start->vuse;
2272 n->base_addr = n_start->base_addr;
2273 n->offset = n_start->offset;
2274 n->src = n_start->src;
2275 n->bytepos = n_start->bytepos;
2276 n->type = n_start->type;
2277 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2278
2279 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2280 {
2281 uint64_t masked1, masked2;
2282
2283 masked1 = n1->n & mask;
2284 masked2 = n2->n & mask;
2285 if (masked1 && masked2 && masked1 != masked2)
2286 return NULL;
2287 }
2288 n->n = n1->n | n2->n;
2289
2290 return source_stmt;
2291 }
2292
2293 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2294 the operation given by the rhs of STMT on the result. If the operation
2295 could successfully be executed the function returns a gimple stmt whose
2296 rhs's first tree is the expression of the source operand and NULL
2297 otherwise. */
2298
2299 static gimple *
2300 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2301 {
2302 enum tree_code code;
2303 tree rhs1, rhs2 = NULL;
2304 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2305 enum gimple_rhs_class rhs_class;
2306
2307 if (!limit || !is_gimple_assign (stmt))
2308 return NULL;
2309
2310 rhs1 = gimple_assign_rhs1 (stmt);
2311
2312 if (find_bswap_or_nop_load (stmt, rhs1, n))
2313 return stmt;
2314
2315 /* Handle BIT_FIELD_REF. */
2316 if (TREE_CODE (rhs1) == BIT_FIELD_REF
2317 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
2318 {
2319 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
2320 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
2321 if (bitpos % BITS_PER_UNIT == 0
2322 && bitsize % BITS_PER_UNIT == 0
2323 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
2324 {
2325 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
2326 if (BYTES_BIG_ENDIAN)
2327 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
2328
2329 /* Shift. */
2330 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
2331 return NULL;
2332
2333 /* Mask. */
2334 uint64_t mask = 0;
2335 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2336 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
2337 i++, tmp <<= BITS_PER_UNIT)
2338 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2339 n->n &= mask;
2340
2341 /* Convert. */
2342 n->type = TREE_TYPE (rhs1);
2343 if (!n->base_addr)
2344 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2345
2346 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
2347 }
2348
2349 return NULL;
2350 }
2351
2352 if (TREE_CODE (rhs1) != SSA_NAME)
2353 return NULL;
2354
2355 code = gimple_assign_rhs_code (stmt);
2356 rhs_class = gimple_assign_rhs_class (stmt);
2357 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2358
2359 if (rhs_class == GIMPLE_BINARY_RHS)
2360 rhs2 = gimple_assign_rhs2 (stmt);
2361
2362 /* Handle unary rhs and binary rhs with integer constants as second
2363 operand. */
2364
2365 if (rhs_class == GIMPLE_UNARY_RHS
2366 || (rhs_class == GIMPLE_BINARY_RHS
2367 && TREE_CODE (rhs2) == INTEGER_CST))
2368 {
2369 if (code != BIT_AND_EXPR
2370 && code != LSHIFT_EXPR
2371 && code != RSHIFT_EXPR
2372 && code != LROTATE_EXPR
2373 && code != RROTATE_EXPR
2374 && !CONVERT_EXPR_CODE_P (code))
2375 return NULL;
2376
2377 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2378
2379 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2380 we have to initialize the symbolic number. */
2381 if (!source_stmt1)
2382 {
2383 if (gimple_assign_load_p (stmt)
2384 || !init_symbolic_number (n, rhs1))
2385 return NULL;
2386 source_stmt1 = stmt;
2387 }
2388
2389 switch (code)
2390 {
2391 case BIT_AND_EXPR:
2392 {
2393 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2394 uint64_t val = int_cst_value (rhs2), mask = 0;
2395 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2396
2397 /* Only constants masking full bytes are allowed. */
2398 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2399 if ((val & tmp) != 0 && (val & tmp) != tmp)
2400 return NULL;
2401 else if (val & tmp)
2402 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2403
2404 n->n &= mask;
2405 }
2406 break;
2407 case LSHIFT_EXPR:
2408 case RSHIFT_EXPR:
2409 case LROTATE_EXPR:
2410 case RROTATE_EXPR:
2411 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2412 return NULL;
2413 break;
2414 CASE_CONVERT:
2415 {
2416 int i, type_size, old_type_size;
2417 tree type;
2418
2419 type = gimple_expr_type (stmt);
2420 type_size = TYPE_PRECISION (type);
2421 if (type_size % BITS_PER_UNIT != 0)
2422 return NULL;
2423 type_size /= BITS_PER_UNIT;
2424 if (type_size > 64 / BITS_PER_MARKER)
2425 return NULL;
2426
2427 /* Sign extension: result is dependent on the value. */
2428 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2429 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2430 && HEAD_MARKER (n->n, old_type_size))
2431 for (i = 0; i < type_size - old_type_size; i++)
2432 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2433 << ((type_size - 1 - i) * BITS_PER_MARKER);
2434
2435 if (type_size < 64 / BITS_PER_MARKER)
2436 {
2437 /* If STMT casts to a smaller type mask out the bits not
2438 belonging to the target type. */
2439 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2440 }
2441 n->type = type;
2442 if (!n->base_addr)
2443 n->range = type_size;
2444 }
2445 break;
2446 default:
2447 return NULL;
2448 };
2449 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2450 }
2451
2452 /* Handle binary rhs. */
2453
2454 if (rhs_class == GIMPLE_BINARY_RHS)
2455 {
2456 struct symbolic_number n1, n2;
2457 gimple *source_stmt, *source_stmt2;
2458
2459 if (code != BIT_IOR_EXPR)
2460 return NULL;
2461
2462 if (TREE_CODE (rhs2) != SSA_NAME)
2463 return NULL;
2464
2465 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2466
2467 switch (code)
2468 {
2469 case BIT_IOR_EXPR:
2470 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2471
2472 if (!source_stmt1)
2473 return NULL;
2474
2475 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2476
2477 if (!source_stmt2)
2478 return NULL;
2479
2480 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2481 return NULL;
2482
2483 if (!n1.vuse != !n2.vuse
2484 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2485 return NULL;
2486
2487 source_stmt
2488 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2489
2490 if (!source_stmt)
2491 return NULL;
2492
2493 if (!verify_symbolic_number_p (n, stmt))
2494 return NULL;
2495
2496 break;
2497 default:
2498 return NULL;
2499 }
2500 return source_stmt;
2501 }
2502 return NULL;
2503 }
2504
2505 /* Check if STMT completes a bswap implementation or a read in a given
2506 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2507 accordingly. It also sets N to represent the kind of operations
2508 performed: size of the resulting expression and whether it works on
2509 a memory source, and if so alias-set and vuse. At last, the
2510 function returns a stmt whose rhs's first tree is the source
2511 expression. */
2512
2513 static gimple *
2514 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2515 {
2516 unsigned rsize;
2517 uint64_t tmpn, mask;
2518 /* The number which the find_bswap_or_nop_1 result should match in order
2519 to have a full byte swap. The number is shifted to the right
2520 according to the size of the symbolic number before using it. */
2521 uint64_t cmpxchg = CMPXCHG;
2522 uint64_t cmpnop = CMPNOP;
2523
2524 gimple *ins_stmt;
2525 int limit;
2526
2527 /* The last parameter determines the depth search limit. It usually
2528 correlates directly to the number n of bytes to be touched. We
2529 increase that number by log2(n) + 1 here in order to also
2530 cover signed -> unsigned conversions of the src operand as can be seen
2531 in libgcc, and for initial shift/and operation of the src operand. */
2532 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2533 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2534 ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2535
2536 if (!ins_stmt)
2537 return NULL;
2538
2539 /* Find real size of result (highest non-zero byte). */
2540 if (n->base_addr)
2541 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2542 else
2543 rsize = n->range;
2544
2545 /* Zero out the bits corresponding to untouched bytes in original gimple
2546 expression. */
2547 if (n->range < (int) sizeof (int64_t))
2548 {
2549 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2550 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2551 cmpnop &= mask;
2552 }
2553
2554 /* Zero out the bits corresponding to unused bytes in the result of the
2555 gimple expression. */
2556 if (rsize < n->range)
2557 {
2558 if (BYTES_BIG_ENDIAN)
2559 {
2560 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2561 cmpxchg &= mask;
2562 cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
2563 }
2564 else
2565 {
2566 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
2567 cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
2568 cmpnop &= mask;
2569 }
2570 n->range = rsize;
2571 }
2572
2573 /* A complete byte swap should make the symbolic number to start with
2574 the largest digit in the highest order byte. Unchanged symbolic
2575 number indicates a read with same endianness as target architecture. */
2576 if (n->n == cmpnop)
2577 *bswap = false;
2578 else if (n->n == cmpxchg)
2579 *bswap = true;
2580 else
2581 return NULL;
2582
2583 /* Useless bit manipulation performed by code. */
2584 if (!n->base_addr && n->n == cmpnop)
2585 return NULL;
2586
2587 n->range *= BITS_PER_UNIT;
2588 return ins_stmt;
2589 }
2590
2591 namespace {
2592
2593 const pass_data pass_data_optimize_bswap =
2594 {
2595 GIMPLE_PASS, /* type */
2596 "bswap", /* name */
2597 OPTGROUP_NONE, /* optinfo_flags */
2598 TV_NONE, /* tv_id */
2599 PROP_ssa, /* properties_required */
2600 0, /* properties_provided */
2601 0, /* properties_destroyed */
2602 0, /* todo_flags_start */
2603 0, /* todo_flags_finish */
2604 };
2605
2606 class pass_optimize_bswap : public gimple_opt_pass
2607 {
2608 public:
2609 pass_optimize_bswap (gcc::context *ctxt)
2610 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2611 {}
2612
2613 /* opt_pass methods: */
2614 virtual bool gate (function *)
2615 {
2616 return flag_expensive_optimizations && optimize;
2617 }
2618
2619 virtual unsigned int execute (function *);
2620
2621 }; // class pass_optimize_bswap
2622
2623 /* Perform the bswap optimization: replace the expression computed in the rhs
2624 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2625 Which of these alternatives replace the rhs is given by N->base_addr (non
2626 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2627 load to perform are also given in N while the builtin bswap invoke is given
2628 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2629 load statements involved to construct the rhs in CUR_STMT and N->range gives
2630 the size of the rhs expression for maintaining some statistics.
2631
2632 Note that if the replacement involve a load, CUR_STMT is moved just after
2633 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2634 changing of basic block. */
2635
2636 static bool
2637 bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl,
2638 tree bswap_type, tree load_type, struct symbolic_number *n,
2639 bool bswap)
2640 {
2641 gimple_stmt_iterator gsi;
2642 tree src, tmp, tgt;
2643 gimple *bswap_stmt;
2644
2645 gsi = gsi_for_stmt (cur_stmt);
2646 src = n->src;
2647 tgt = gimple_assign_lhs (cur_stmt);
2648
2649 /* Need to load the value from memory first. */
2650 if (n->base_addr)
2651 {
2652 gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt);
2653 tree addr_expr, addr_tmp, val_expr, val_tmp;
2654 tree load_offset_ptr, aligned_load_type;
2655 gimple *addr_stmt, *load_stmt;
2656 unsigned align;
2657 HOST_WIDE_INT load_offset = 0;
2658 basic_block ins_bb, cur_bb;
2659
2660 ins_bb = gimple_bb (ins_stmt);
2661 cur_bb = gimple_bb (cur_stmt);
2662 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
2663 return false;
2664
2665 align = get_object_alignment (src);
2666
2667 /* Move cur_stmt just before one of the load of the original
2668 to ensure it has the same VUSE. See PR61517 for what could
2669 go wrong. */
2670 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
2671 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2672 gsi_move_before (&gsi, &gsi_ins);
2673 gsi = gsi_for_stmt (cur_stmt);
2674
2675 /* Compute address to load from and cast according to the size
2676 of the load. */
2677 addr_expr = build_fold_addr_expr (unshare_expr (src));
2678 if (is_gimple_mem_ref_addr (addr_expr))
2679 addr_tmp = addr_expr;
2680 else
2681 {
2682 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2683 "load_src");
2684 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2685 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2686 }
2687
2688 /* Perform the load. */
2689 aligned_load_type = load_type;
2690 if (align < TYPE_ALIGN (load_type))
2691 aligned_load_type = build_aligned_type (load_type, align);
2692 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2693 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2694 load_offset_ptr);
2695
2696 if (!bswap)
2697 {
2698 if (n->range == 16)
2699 nop_stats.found_16bit++;
2700 else if (n->range == 32)
2701 nop_stats.found_32bit++;
2702 else
2703 {
2704 gcc_assert (n->range == 64);
2705 nop_stats.found_64bit++;
2706 }
2707
2708 /* Convert the result of load if necessary. */
2709 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2710 {
2711 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2712 "load_dst");
2713 load_stmt = gimple_build_assign (val_tmp, val_expr);
2714 gimple_set_vuse (load_stmt, n->vuse);
2715 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2716 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2717 }
2718 else
2719 {
2720 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2721 gimple_set_vuse (cur_stmt, n->vuse);
2722 }
2723 update_stmt (cur_stmt);
2724
2725 if (dump_file)
2726 {
2727 fprintf (dump_file,
2728 "%d bit load in target endianness found at: ",
2729 (int) n->range);
2730 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2731 }
2732 return true;
2733 }
2734 else
2735 {
2736 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2737 load_stmt = gimple_build_assign (val_tmp, val_expr);
2738 gimple_set_vuse (load_stmt, n->vuse);
2739 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2740 }
2741 src = val_tmp;
2742 }
2743 else if (TREE_CODE (src) == BIT_FIELD_REF)
2744 src = TREE_OPERAND (src, 0);
2745
2746 if (n->range == 16)
2747 bswap_stats.found_16bit++;
2748 else if (n->range == 32)
2749 bswap_stats.found_32bit++;
2750 else
2751 {
2752 gcc_assert (n->range == 64);
2753 bswap_stats.found_64bit++;
2754 }
2755
2756 tmp = src;
2757
2758 /* Convert the src expression if necessary. */
2759 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2760 {
2761 gimple *convert_stmt;
2762
2763 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2764 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2765 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2766 }
2767
2768 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2769 are considered as rotation of 2N bit values by N bits is generally not
2770 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2771 gives 0x03040102 while a bswap for that value is 0x04030201. */
2772 if (bswap && n->range == 16)
2773 {
2774 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2775 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2776 bswap_stmt = gimple_build_assign (NULL, src);
2777 }
2778 else
2779 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2780
2781 tmp = tgt;
2782
2783 /* Convert the result if necessary. */
2784 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2785 {
2786 gimple *convert_stmt;
2787
2788 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2789 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2790 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2791 }
2792
2793 gimple_set_lhs (bswap_stmt, tmp);
2794
2795 if (dump_file)
2796 {
2797 fprintf (dump_file, "%d bit bswap implementation found at: ",
2798 (int) n->range);
2799 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2800 }
2801
2802 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2803 gsi_remove (&gsi, true);
2804 return true;
2805 }
2806
2807 /* Find manual byte swap implementations as well as load in a given
2808 endianness. Byte swaps are turned into a bswap builtin invokation
2809 while endian loads are converted to bswap builtin invokation or
2810 simple load according to the target endianness. */
2811
2812 unsigned int
2813 pass_optimize_bswap::execute (function *fun)
2814 {
2815 basic_block bb;
2816 bool bswap32_p, bswap64_p;
2817 bool changed = false;
2818 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2819
2820 if (BITS_PER_UNIT != 8)
2821 return 0;
2822
2823 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2824 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2825 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2826 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2827 || (bswap32_p && word_mode == SImode)));
2828
2829 /* Determine the argument type of the builtins. The code later on
2830 assumes that the return and argument type are the same. */
2831 if (bswap32_p)
2832 {
2833 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2834 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2835 }
2836
2837 if (bswap64_p)
2838 {
2839 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2840 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2841 }
2842
2843 memset (&nop_stats, 0, sizeof (nop_stats));
2844 memset (&bswap_stats, 0, sizeof (bswap_stats));
2845 calculate_dominance_info (CDI_DOMINATORS);
2846
2847 FOR_EACH_BB_FN (bb, fun)
2848 {
2849 gimple_stmt_iterator gsi;
2850
2851 /* We do a reverse scan for bswap patterns to make sure we get the
2852 widest match. As bswap pattern matching doesn't handle previously
2853 inserted smaller bswap replacements as sub-patterns, the wider
2854 variant wouldn't be detected. */
2855 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2856 {
2857 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
2858 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2859 enum tree_code code;
2860 struct symbolic_number n;
2861 bool bswap;
2862
2863 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2864 might be moved to a different basic block by bswap_replace and gsi
2865 must not points to it if that's the case. Moving the gsi_prev
2866 there make sure that gsi points to the statement previous to
2867 cur_stmt while still making sure that all statements are
2868 considered in this basic block. */
2869 gsi_prev (&gsi);
2870
2871 if (!is_gimple_assign (cur_stmt))
2872 continue;
2873
2874 code = gimple_assign_rhs_code (cur_stmt);
2875 switch (code)
2876 {
2877 case LROTATE_EXPR:
2878 case RROTATE_EXPR:
2879 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2880 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2881 % BITS_PER_UNIT)
2882 continue;
2883 /* Fall through. */
2884 case BIT_IOR_EXPR:
2885 break;
2886 default:
2887 continue;
2888 }
2889
2890 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2891
2892 if (!ins_stmt)
2893 continue;
2894
2895 switch (n.range)
2896 {
2897 case 16:
2898 /* Already in canonical form, nothing to do. */
2899 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2900 continue;
2901 load_type = bswap_type = uint16_type_node;
2902 break;
2903 case 32:
2904 load_type = uint32_type_node;
2905 if (bswap32_p)
2906 {
2907 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2908 bswap_type = bswap32_type;
2909 }
2910 break;
2911 case 64:
2912 load_type = uint64_type_node;
2913 if (bswap64_p)
2914 {
2915 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2916 bswap_type = bswap64_type;
2917 }
2918 break;
2919 default:
2920 continue;
2921 }
2922
2923 if (bswap && !fndecl && n.range != 16)
2924 continue;
2925
2926 if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type,
2927 &n, bswap))
2928 changed = true;
2929 }
2930 }
2931
2932 statistics_counter_event (fun, "16-bit nop implementations found",
2933 nop_stats.found_16bit);
2934 statistics_counter_event (fun, "32-bit nop implementations found",
2935 nop_stats.found_32bit);
2936 statistics_counter_event (fun, "64-bit nop implementations found",
2937 nop_stats.found_64bit);
2938 statistics_counter_event (fun, "16-bit bswap implementations found",
2939 bswap_stats.found_16bit);
2940 statistics_counter_event (fun, "32-bit bswap implementations found",
2941 bswap_stats.found_32bit);
2942 statistics_counter_event (fun, "64-bit bswap implementations found",
2943 bswap_stats.found_64bit);
2944
2945 return (changed ? TODO_update_ssa : 0);
2946 }
2947
2948 } // anon namespace
2949
2950 gimple_opt_pass *
2951 make_pass_optimize_bswap (gcc::context *ctxt)
2952 {
2953 return new pass_optimize_bswap (ctxt);
2954 }
2955
2956 /* Return true if stmt is a type conversion operation that can be stripped
2957 when used in a widening multiply operation. */
2958 static bool
2959 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2960 {
2961 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2962
2963 if (TREE_CODE (result_type) == INTEGER_TYPE)
2964 {
2965 tree op_type;
2966 tree inner_op_type;
2967
2968 if (!CONVERT_EXPR_CODE_P (rhs_code))
2969 return false;
2970
2971 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2972
2973 /* If the type of OP has the same precision as the result, then
2974 we can strip this conversion. The multiply operation will be
2975 selected to create the correct extension as a by-product. */
2976 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2977 return true;
2978
2979 /* We can also strip a conversion if it preserves the signed-ness of
2980 the operation and doesn't narrow the range. */
2981 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2982
2983 /* If the inner-most type is unsigned, then we can strip any
2984 intermediate widening operation. If it's signed, then the
2985 intermediate widening operation must also be signed. */
2986 if ((TYPE_UNSIGNED (inner_op_type)
2987 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2988 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2989 return true;
2990
2991 return false;
2992 }
2993
2994 return rhs_code == FIXED_CONVERT_EXPR;
2995 }
2996
2997 /* Return true if RHS is a suitable operand for a widening multiplication,
2998 assuming a target type of TYPE.
2999 There are two cases:
3000
3001 - RHS makes some value at least twice as wide. Store that value
3002 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
3003
3004 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
3005 but leave *TYPE_OUT untouched. */
3006
3007 static bool
3008 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
3009 tree *new_rhs_out)
3010 {
3011 gimple *stmt;
3012 tree type1, rhs1;
3013
3014 if (TREE_CODE (rhs) == SSA_NAME)
3015 {
3016 stmt = SSA_NAME_DEF_STMT (rhs);
3017 if (is_gimple_assign (stmt))
3018 {
3019 if (! widening_mult_conversion_strippable_p (type, stmt))
3020 rhs1 = rhs;
3021 else
3022 {
3023 rhs1 = gimple_assign_rhs1 (stmt);
3024
3025 if (TREE_CODE (rhs1) == INTEGER_CST)
3026 {
3027 *new_rhs_out = rhs1;
3028 *type_out = NULL;
3029 return true;
3030 }
3031 }
3032 }
3033 else
3034 rhs1 = rhs;
3035
3036 type1 = TREE_TYPE (rhs1);
3037
3038 if (TREE_CODE (type1) != TREE_CODE (type)
3039 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3040 return false;
3041
3042 *new_rhs_out = rhs1;
3043 *type_out = type1;
3044 return true;
3045 }
3046
3047 if (TREE_CODE (rhs) == INTEGER_CST)
3048 {
3049 *new_rhs_out = rhs;
3050 *type_out = NULL;
3051 return true;
3052 }
3053
3054 return false;
3055 }
3056
3057 /* Return true if STMT performs a widening multiplication, assuming the
3058 output type is TYPE. If so, store the unwidened types of the operands
3059 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3060 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3061 and *TYPE2_OUT would give the operands of the multiplication. */
3062
3063 static bool
3064 is_widening_mult_p (gimple *stmt,
3065 tree *type1_out, tree *rhs1_out,
3066 tree *type2_out, tree *rhs2_out)
3067 {
3068 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3069
3070 if (TREE_CODE (type) != INTEGER_TYPE
3071 && TREE_CODE (type) != FIXED_POINT_TYPE)
3072 return false;
3073
3074 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3075 rhs1_out))
3076 return false;
3077
3078 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3079 rhs2_out))
3080 return false;
3081
3082 if (*type1_out == NULL)
3083 {
3084 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3085 return false;
3086 *type1_out = *type2_out;
3087 }
3088
3089 if (*type2_out == NULL)
3090 {
3091 if (!int_fits_type_p (*rhs2_out, *type1_out))
3092 return false;
3093 *type2_out = *type1_out;
3094 }
3095
3096 /* Ensure that the larger of the two operands comes first. */
3097 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3098 {
3099 std::swap (*type1_out, *type2_out);
3100 std::swap (*rhs1_out, *rhs2_out);
3101 }
3102
3103 return true;
3104 }
3105
3106 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3107 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3108 value is true iff we converted the statement. */
3109
3110 static bool
3111 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3112 {
3113 tree lhs, rhs1, rhs2, type, type1, type2;
3114 enum insn_code handler;
3115 machine_mode to_mode, from_mode, actual_mode;
3116 optab op;
3117 int actual_precision;
3118 location_t loc = gimple_location (stmt);
3119 bool from_unsigned1, from_unsigned2;
3120
3121 lhs = gimple_assign_lhs (stmt);
3122 type = TREE_TYPE (lhs);
3123 if (TREE_CODE (type) != INTEGER_TYPE)
3124 return false;
3125
3126 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3127 return false;
3128
3129 to_mode = TYPE_MODE (type);
3130 from_mode = TYPE_MODE (type1);
3131 from_unsigned1 = TYPE_UNSIGNED (type1);
3132 from_unsigned2 = TYPE_UNSIGNED (type2);
3133
3134 if (from_unsigned1 && from_unsigned2)
3135 op = umul_widen_optab;
3136 else if (!from_unsigned1 && !from_unsigned2)
3137 op = smul_widen_optab;
3138 else
3139 op = usmul_widen_optab;
3140
3141 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3142 0, &actual_mode);
3143
3144 if (handler == CODE_FOR_nothing)
3145 {
3146 if (op != smul_widen_optab)
3147 {
3148 /* We can use a signed multiply with unsigned types as long as
3149 there is a wider mode to use, or it is the smaller of the two
3150 types that is unsigned. Note that type1 >= type2, always. */
3151 if ((TYPE_UNSIGNED (type1)
3152 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3153 || (TYPE_UNSIGNED (type2)
3154 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3155 {
3156 from_mode = GET_MODE_WIDER_MODE (from_mode);
3157 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3158 return false;
3159 }
3160
3161 op = smul_widen_optab;
3162 handler = find_widening_optab_handler_and_mode (op, to_mode,
3163 from_mode, 0,
3164 &actual_mode);
3165
3166 if (handler == CODE_FOR_nothing)
3167 return false;
3168
3169 from_unsigned1 = from_unsigned2 = false;
3170 }
3171 else
3172 return false;
3173 }
3174
3175 /* Ensure that the inputs to the handler are in the correct precison
3176 for the opcode. This will be the full mode size. */
3177 actual_precision = GET_MODE_PRECISION (actual_mode);
3178 if (2 * actual_precision > TYPE_PRECISION (type))
3179 return false;
3180 if (actual_precision != TYPE_PRECISION (type1)
3181 || from_unsigned1 != TYPE_UNSIGNED (type1))
3182 rhs1 = build_and_insert_cast (gsi, loc,
3183 build_nonstandard_integer_type
3184 (actual_precision, from_unsigned1), rhs1);
3185 if (actual_precision != TYPE_PRECISION (type2)
3186 || from_unsigned2 != TYPE_UNSIGNED (type2))
3187 rhs2 = build_and_insert_cast (gsi, loc,
3188 build_nonstandard_integer_type
3189 (actual_precision, from_unsigned2), rhs2);
3190
3191 /* Handle constants. */
3192 if (TREE_CODE (rhs1) == INTEGER_CST)
3193 rhs1 = fold_convert (type1, rhs1);
3194 if (TREE_CODE (rhs2) == INTEGER_CST)
3195 rhs2 = fold_convert (type2, rhs2);
3196
3197 gimple_assign_set_rhs1 (stmt, rhs1);
3198 gimple_assign_set_rhs2 (stmt, rhs2);
3199 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3200 update_stmt (stmt);
3201 widen_mul_stats.widen_mults_inserted++;
3202 return true;
3203 }
3204
3205 /* Process a single gimple statement STMT, which is found at the
3206 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3207 rhs (given by CODE), and try to convert it into a
3208 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3209 is true iff we converted the statement. */
3210
3211 static bool
3212 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3213 enum tree_code code)
3214 {
3215 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3216 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3217 tree type, type1, type2, optype;
3218 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3219 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3220 optab this_optab;
3221 enum tree_code wmult_code;
3222 enum insn_code handler;
3223 machine_mode to_mode, from_mode, actual_mode;
3224 location_t loc = gimple_location (stmt);
3225 int actual_precision;
3226 bool from_unsigned1, from_unsigned2;
3227
3228 lhs = gimple_assign_lhs (stmt);
3229 type = TREE_TYPE (lhs);
3230 if (TREE_CODE (type) != INTEGER_TYPE
3231 && TREE_CODE (type) != FIXED_POINT_TYPE)
3232 return false;
3233
3234 if (code == MINUS_EXPR)
3235 wmult_code = WIDEN_MULT_MINUS_EXPR;
3236 else
3237 wmult_code = WIDEN_MULT_PLUS_EXPR;
3238
3239 rhs1 = gimple_assign_rhs1 (stmt);
3240 rhs2 = gimple_assign_rhs2 (stmt);
3241
3242 if (TREE_CODE (rhs1) == SSA_NAME)
3243 {
3244 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3245 if (is_gimple_assign (rhs1_stmt))
3246 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3247 }
3248
3249 if (TREE_CODE (rhs2) == SSA_NAME)
3250 {
3251 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3252 if (is_gimple_assign (rhs2_stmt))
3253 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3254 }
3255
3256 /* Allow for one conversion statement between the multiply
3257 and addition/subtraction statement. If there are more than
3258 one conversions then we assume they would invalidate this
3259 transformation. If that's not the case then they should have
3260 been folded before now. */
3261 if (CONVERT_EXPR_CODE_P (rhs1_code))
3262 {
3263 conv1_stmt = rhs1_stmt;
3264 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3265 if (TREE_CODE (rhs1) == SSA_NAME)
3266 {
3267 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3268 if (is_gimple_assign (rhs1_stmt))
3269 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3270 }
3271 else
3272 return false;
3273 }
3274 if (CONVERT_EXPR_CODE_P (rhs2_code))
3275 {
3276 conv2_stmt = rhs2_stmt;
3277 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3278 if (TREE_CODE (rhs2) == SSA_NAME)
3279 {
3280 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3281 if (is_gimple_assign (rhs2_stmt))
3282 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3283 }
3284 else
3285 return false;
3286 }
3287
3288 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3289 is_widening_mult_p, but we still need the rhs returns.
3290
3291 It might also appear that it would be sufficient to use the existing
3292 operands of the widening multiply, but that would limit the choice of
3293 multiply-and-accumulate instructions.
3294
3295 If the widened-multiplication result has more than one uses, it is
3296 probably wiser not to do the conversion. */
3297 if (code == PLUS_EXPR
3298 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3299 {
3300 if (!has_single_use (rhs1)
3301 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3302 &type2, &mult_rhs2))
3303 return false;
3304 add_rhs = rhs2;
3305 conv_stmt = conv1_stmt;
3306 }
3307 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3308 {
3309 if (!has_single_use (rhs2)
3310 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3311 &type2, &mult_rhs2))
3312 return false;
3313 add_rhs = rhs1;
3314 conv_stmt = conv2_stmt;
3315 }
3316 else
3317 return false;
3318
3319 to_mode = TYPE_MODE (type);
3320 from_mode = TYPE_MODE (type1);
3321 from_unsigned1 = TYPE_UNSIGNED (type1);
3322 from_unsigned2 = TYPE_UNSIGNED (type2);
3323 optype = type1;
3324
3325 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3326 if (from_unsigned1 != from_unsigned2)
3327 {
3328 if (!INTEGRAL_TYPE_P (type))
3329 return false;
3330 /* We can use a signed multiply with unsigned types as long as
3331 there is a wider mode to use, or it is the smaller of the two
3332 types that is unsigned. Note that type1 >= type2, always. */
3333 if ((from_unsigned1
3334 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3335 || (from_unsigned2
3336 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3337 {
3338 from_mode = GET_MODE_WIDER_MODE (from_mode);
3339 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3340 return false;
3341 }
3342
3343 from_unsigned1 = from_unsigned2 = false;
3344 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3345 false);
3346 }
3347
3348 /* If there was a conversion between the multiply and addition
3349 then we need to make sure it fits a multiply-and-accumulate.
3350 The should be a single mode change which does not change the
3351 value. */
3352 if (conv_stmt)
3353 {
3354 /* We use the original, unmodified data types for this. */
3355 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3356 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3357 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3358 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3359
3360 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3361 {
3362 /* Conversion is a truncate. */
3363 if (TYPE_PRECISION (to_type) < data_size)
3364 return false;
3365 }
3366 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3367 {
3368 /* Conversion is an extend. Check it's the right sort. */
3369 if (TYPE_UNSIGNED (from_type) != is_unsigned
3370 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3371 return false;
3372 }
3373 /* else convert is a no-op for our purposes. */
3374 }
3375
3376 /* Verify that the machine can perform a widening multiply
3377 accumulate in this mode/signedness combination, otherwise
3378 this transformation is likely to pessimize code. */
3379 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3380 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3381 from_mode, 0, &actual_mode);
3382
3383 if (handler == CODE_FOR_nothing)
3384 return false;
3385
3386 /* Ensure that the inputs to the handler are in the correct precison
3387 for the opcode. This will be the full mode size. */
3388 actual_precision = GET_MODE_PRECISION (actual_mode);
3389 if (actual_precision != TYPE_PRECISION (type1)
3390 || from_unsigned1 != TYPE_UNSIGNED (type1))
3391 mult_rhs1 = build_and_insert_cast (gsi, loc,
3392 build_nonstandard_integer_type
3393 (actual_precision, from_unsigned1),
3394 mult_rhs1);
3395 if (actual_precision != TYPE_PRECISION (type2)
3396 || from_unsigned2 != TYPE_UNSIGNED (type2))
3397 mult_rhs2 = build_and_insert_cast (gsi, loc,
3398 build_nonstandard_integer_type
3399 (actual_precision, from_unsigned2),
3400 mult_rhs2);
3401
3402 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3403 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3404
3405 /* Handle constants. */
3406 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3407 mult_rhs1 = fold_convert (type1, mult_rhs1);
3408 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3409 mult_rhs2 = fold_convert (type2, mult_rhs2);
3410
3411 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3412 add_rhs);
3413 update_stmt (gsi_stmt (*gsi));
3414 widen_mul_stats.maccs_inserted++;
3415 return true;
3416 }
3417
3418 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3419 with uses in additions and subtractions to form fused multiply-add
3420 operations. Returns true if successful and MUL_STMT should be removed. */
3421
3422 static bool
3423 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3424 {
3425 tree mul_result = gimple_get_lhs (mul_stmt);
3426 tree type = TREE_TYPE (mul_result);
3427 gimple *use_stmt, *neguse_stmt;
3428 gassign *fma_stmt;
3429 use_operand_p use_p;
3430 imm_use_iterator imm_iter;
3431
3432 if (FLOAT_TYPE_P (type)
3433 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3434 return false;
3435
3436 /* We don't want to do bitfield reduction ops. */
3437 if (INTEGRAL_TYPE_P (type)
3438 && (TYPE_PRECISION (type)
3439 != GET_MODE_PRECISION (TYPE_MODE (type))))
3440 return false;
3441
3442 /* If the target doesn't support it, don't generate it. We assume that
3443 if fma isn't available then fms, fnma or fnms are not either. */
3444 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3445 return false;
3446
3447 /* If the multiplication has zero uses, it is kept around probably because
3448 of -fnon-call-exceptions. Don't optimize it away in that case,
3449 it is DCE job. */
3450 if (has_zero_uses (mul_result))
3451 return false;
3452
3453 /* Make sure that the multiplication statement becomes dead after
3454 the transformation, thus that all uses are transformed to FMAs.
3455 This means we assume that an FMA operation has the same cost
3456 as an addition. */
3457 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3458 {
3459 enum tree_code use_code;
3460 tree result = mul_result;
3461 bool negate_p = false;
3462
3463 use_stmt = USE_STMT (use_p);
3464
3465 if (is_gimple_debug (use_stmt))
3466 continue;
3467
3468 /* For now restrict this operations to single basic blocks. In theory
3469 we would want to support sinking the multiplication in
3470 m = a*b;
3471 if ()
3472 ma = m + c;
3473 else
3474 d = m;
3475 to form a fma in the then block and sink the multiplication to the
3476 else block. */
3477 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3478 return false;
3479
3480 if (!is_gimple_assign (use_stmt))
3481 return false;
3482
3483 use_code = gimple_assign_rhs_code (use_stmt);
3484
3485 /* A negate on the multiplication leads to FNMA. */
3486 if (use_code == NEGATE_EXPR)
3487 {
3488 ssa_op_iter iter;
3489 use_operand_p usep;
3490
3491 result = gimple_assign_lhs (use_stmt);
3492
3493 /* Make sure the negate statement becomes dead with this
3494 single transformation. */
3495 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3496 &use_p, &neguse_stmt))
3497 return false;
3498
3499 /* Make sure the multiplication isn't also used on that stmt. */
3500 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3501 if (USE_FROM_PTR (usep) == mul_result)
3502 return false;
3503
3504 /* Re-validate. */
3505 use_stmt = neguse_stmt;
3506 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3507 return false;
3508 if (!is_gimple_assign (use_stmt))
3509 return false;
3510
3511 use_code = gimple_assign_rhs_code (use_stmt);
3512 negate_p = true;
3513 }
3514
3515 switch (use_code)
3516 {
3517 case MINUS_EXPR:
3518 if (gimple_assign_rhs2 (use_stmt) == result)
3519 negate_p = !negate_p;
3520 break;
3521 case PLUS_EXPR:
3522 break;
3523 default:
3524 /* FMA can only be formed from PLUS and MINUS. */
3525 return false;
3526 }
3527
3528 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3529 by a MULT_EXPR that we'll visit later, we might be able to
3530 get a more profitable match with fnma.
3531 OTOH, if we don't, a negate / fma pair has likely lower latency
3532 that a mult / subtract pair. */
3533 if (use_code == MINUS_EXPR && !negate_p
3534 && gimple_assign_rhs1 (use_stmt) == result
3535 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3536 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3537 {
3538 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3539
3540 if (TREE_CODE (rhs2) == SSA_NAME)
3541 {
3542 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3543 if (has_single_use (rhs2)
3544 && is_gimple_assign (stmt2)
3545 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3546 return false;
3547 }
3548 }
3549
3550 /* We can't handle a * b + a * b. */
3551 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3552 return false;
3553
3554 /* While it is possible to validate whether or not the exact form
3555 that we've recognized is available in the backend, the assumption
3556 is that the transformation is never a loss. For instance, suppose
3557 the target only has the plain FMA pattern available. Consider
3558 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3559 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3560 still have 3 operations, but in the FMA form the two NEGs are
3561 independent and could be run in parallel. */
3562 }
3563
3564 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3565 {
3566 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3567 enum tree_code use_code;
3568 tree addop, mulop1 = op1, result = mul_result;
3569 bool negate_p = false;
3570
3571 if (is_gimple_debug (use_stmt))
3572 continue;
3573
3574 use_code = gimple_assign_rhs_code (use_stmt);
3575 if (use_code == NEGATE_EXPR)
3576 {
3577 result = gimple_assign_lhs (use_stmt);
3578 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3579 gsi_remove (&gsi, true);
3580 release_defs (use_stmt);
3581
3582 use_stmt = neguse_stmt;
3583 gsi = gsi_for_stmt (use_stmt);
3584 use_code = gimple_assign_rhs_code (use_stmt);
3585 negate_p = true;
3586 }
3587
3588 if (gimple_assign_rhs1 (use_stmt) == result)
3589 {
3590 addop = gimple_assign_rhs2 (use_stmt);
3591 /* a * b - c -> a * b + (-c) */
3592 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3593 addop = force_gimple_operand_gsi (&gsi,
3594 build1 (NEGATE_EXPR,
3595 type, addop),
3596 true, NULL_TREE, true,
3597 GSI_SAME_STMT);
3598 }
3599 else
3600 {
3601 addop = gimple_assign_rhs1 (use_stmt);
3602 /* a - b * c -> (-b) * c + a */
3603 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3604 negate_p = !negate_p;
3605 }
3606
3607 if (negate_p)
3608 mulop1 = force_gimple_operand_gsi (&gsi,
3609 build1 (NEGATE_EXPR,
3610 type, mulop1),
3611 true, NULL_TREE, true,
3612 GSI_SAME_STMT);
3613
3614 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3615 FMA_EXPR, mulop1, op2, addop);
3616 gsi_replace (&gsi, fma_stmt, true);
3617 widen_mul_stats.fmas_inserted++;
3618 }
3619
3620 return true;
3621 }
3622
3623
3624 /* Helper function of match_uaddsub_overflow. Return 1
3625 if USE_STMT is unsigned overflow check ovf != 0 for
3626 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3627 and 0 otherwise. */
3628
3629 static int
3630 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3631 {
3632 enum tree_code ccode = ERROR_MARK;
3633 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3634 if (gimple_code (use_stmt) == GIMPLE_COND)
3635 {
3636 ccode = gimple_cond_code (use_stmt);
3637 crhs1 = gimple_cond_lhs (use_stmt);
3638 crhs2 = gimple_cond_rhs (use_stmt);
3639 }
3640 else if (is_gimple_assign (use_stmt))
3641 {
3642 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3643 {
3644 ccode = gimple_assign_rhs_code (use_stmt);
3645 crhs1 = gimple_assign_rhs1 (use_stmt);
3646 crhs2 = gimple_assign_rhs2 (use_stmt);
3647 }
3648 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3649 {
3650 tree cond = gimple_assign_rhs1 (use_stmt);
3651 if (COMPARISON_CLASS_P (cond))
3652 {
3653 ccode = TREE_CODE (cond);
3654 crhs1 = TREE_OPERAND (cond, 0);
3655 crhs2 = TREE_OPERAND (cond, 1);
3656 }
3657 else
3658 return 0;
3659 }
3660 else
3661 return 0;
3662 }
3663 else
3664 return 0;
3665
3666 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3667 return 0;
3668
3669 enum tree_code code = gimple_assign_rhs_code (stmt);
3670 tree lhs = gimple_assign_lhs (stmt);
3671 tree rhs1 = gimple_assign_rhs1 (stmt);
3672 tree rhs2 = gimple_assign_rhs2 (stmt);
3673
3674 switch (ccode)
3675 {
3676 case GT_EXPR:
3677 case LE_EXPR:
3678 /* r = a - b; r > a or r <= a
3679 r = a + b; a > r or a <= r or b > r or b <= r. */
3680 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3681 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3682 && crhs2 == lhs))
3683 return ccode == GT_EXPR ? 1 : -1;
3684 break;
3685 case LT_EXPR:
3686 case GE_EXPR:
3687 /* r = a - b; a < r or a >= r
3688 r = a + b; r < a or r >= a or r < b or r >= b. */
3689 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3690 || (code == PLUS_EXPR && crhs1 == lhs
3691 && (crhs2 == rhs1 || crhs2 == rhs2)))
3692 return ccode == LT_EXPR ? 1 : -1;
3693 break;
3694 default:
3695 break;
3696 }
3697 return 0;
3698 }
3699
3700 /* Recognize for unsigned x
3701 x = y - z;
3702 if (x > y)
3703 where there are other uses of x and replace it with
3704 _7 = SUB_OVERFLOW (y, z);
3705 x = REALPART_EXPR <_7>;
3706 _8 = IMAGPART_EXPR <_7>;
3707 if (_8)
3708 and similarly for addition. */
3709
3710 static bool
3711 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3712 enum tree_code code)
3713 {
3714 tree lhs = gimple_assign_lhs (stmt);
3715 tree type = TREE_TYPE (lhs);
3716 use_operand_p use_p;
3717 imm_use_iterator iter;
3718 bool use_seen = false;
3719 bool ovf_use_seen = false;
3720 gimple *use_stmt;
3721
3722 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3723 if (!INTEGRAL_TYPE_P (type)
3724 || !TYPE_UNSIGNED (type)
3725 || has_zero_uses (lhs)
3726 || has_single_use (lhs)
3727 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3728 TYPE_MODE (type)) == CODE_FOR_nothing)
3729 return false;
3730
3731 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3732 {
3733 use_stmt = USE_STMT (use_p);
3734 if (is_gimple_debug (use_stmt))
3735 continue;
3736
3737 if (uaddsub_overflow_check_p (stmt, use_stmt))
3738 ovf_use_seen = true;
3739 else
3740 use_seen = true;
3741 if (ovf_use_seen && use_seen)
3742 break;
3743 }
3744
3745 if (!ovf_use_seen || !use_seen)
3746 return false;
3747
3748 tree ctype = build_complex_type (type);
3749 tree rhs1 = gimple_assign_rhs1 (stmt);
3750 tree rhs2 = gimple_assign_rhs2 (stmt);
3751 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3752 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3753 2, rhs1, rhs2);
3754 tree ctmp = make_ssa_name (ctype);
3755 gimple_call_set_lhs (g, ctmp);
3756 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3757 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3758 build1 (REALPART_EXPR, type, ctmp));
3759 gsi_replace (gsi, g2, true);
3760 tree ovf = make_ssa_name (type);
3761 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3762 build1 (IMAGPART_EXPR, type, ctmp));
3763 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3764
3765 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3766 {
3767 if (is_gimple_debug (use_stmt))
3768 continue;
3769
3770 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3771 if (ovf_use == 0)
3772 continue;
3773 if (gimple_code (use_stmt) == GIMPLE_COND)
3774 {
3775 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3776 gimple_cond_set_lhs (cond_stmt, ovf);
3777 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3778 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3779 }
3780 else
3781 {
3782 gcc_checking_assert (is_gimple_assign (use_stmt));
3783 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3784 {
3785 gimple_assign_set_rhs1 (use_stmt, ovf);
3786 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3787 gimple_assign_set_rhs_code (use_stmt,
3788 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3789 }
3790 else
3791 {
3792 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3793 == COND_EXPR);
3794 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3795 boolean_type_node, ovf,
3796 build_int_cst (type, 0));
3797 gimple_assign_set_rhs1 (use_stmt, cond);
3798 }
3799 }
3800 update_stmt (use_stmt);
3801 }
3802 return true;
3803 }
3804
3805 /* Return true if target has support for divmod. */
3806
3807 static bool
3808 target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3809 {
3810 /* If target supports hardware divmod insn, use it for divmod. */
3811 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3812 return true;
3813
3814 /* Check if libfunc for divmod is available. */
3815 rtx libfunc = optab_libfunc (divmod_optab, mode);
3816 if (libfunc != NULL_RTX)
3817 {
3818 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3819 we don't want to use the libfunc even if it exists for given mode. */
3820 for (machine_mode div_mode = mode;
3821 div_mode != VOIDmode;
3822 div_mode = GET_MODE_WIDER_MODE (div_mode))
3823 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3824 return false;
3825
3826 return targetm.expand_divmod_libfunc != NULL;
3827 }
3828
3829 return false;
3830 }
3831
3832 /* Check if stmt is candidate for divmod transform. */
3833
3834 static bool
3835 divmod_candidate_p (gassign *stmt)
3836 {
3837 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3838 enum machine_mode mode = TYPE_MODE (type);
3839 optab divmod_optab, div_optab;
3840
3841 if (TYPE_UNSIGNED (type))
3842 {
3843 divmod_optab = udivmod_optab;
3844 div_optab = udiv_optab;
3845 }
3846 else
3847 {
3848 divmod_optab = sdivmod_optab;
3849 div_optab = sdiv_optab;
3850 }
3851
3852 tree op1 = gimple_assign_rhs1 (stmt);
3853 tree op2 = gimple_assign_rhs2 (stmt);
3854
3855 /* Disable the transform if either is a constant, since division-by-constant
3856 may have specialized expansion. */
3857 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3858 return false;
3859
3860 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3861 expand using the [su]divv optabs. */
3862 if (TYPE_OVERFLOW_TRAPS (type))
3863 return false;
3864
3865 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3866 return false;
3867
3868 return true;
3869 }
3870
3871 /* This function looks for:
3872 t1 = a TRUNC_DIV_EXPR b;
3873 t2 = a TRUNC_MOD_EXPR b;
3874 and transforms it to the following sequence:
3875 complex_tmp = DIVMOD (a, b);
3876 t1 = REALPART_EXPR(a);
3877 t2 = IMAGPART_EXPR(b);
3878 For conditions enabling the transform see divmod_candidate_p().
3879
3880 The pass has three parts:
3881 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3882 other trunc_div_expr and trunc_mod_expr stmts.
3883 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3884 to stmts vector.
3885 3) Insert DIVMOD call just before top_stmt and update entries in
3886 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3887 IMAGPART_EXPR for mod). */
3888
3889 static bool
3890 convert_to_divmod (gassign *stmt)
3891 {
3892 if (stmt_can_throw_internal (stmt)
3893 || !divmod_candidate_p (stmt))
3894 return false;
3895
3896 tree op1 = gimple_assign_rhs1 (stmt);
3897 tree op2 = gimple_assign_rhs2 (stmt);
3898
3899 imm_use_iterator use_iter;
3900 gimple *use_stmt;
3901 auto_vec<gimple *> stmts;
3902
3903 gimple *top_stmt = stmt;
3904 basic_block top_bb = gimple_bb (stmt);
3905
3906 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3907 at-least stmt and possibly other trunc_div/trunc_mod stmts
3908 having same operands as stmt. */
3909
3910 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3911 {
3912 if (is_gimple_assign (use_stmt)
3913 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3914 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3915 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3916 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3917 {
3918 if (stmt_can_throw_internal (use_stmt))
3919 continue;
3920
3921 basic_block bb = gimple_bb (use_stmt);
3922
3923 if (bb == top_bb)
3924 {
3925 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3926 top_stmt = use_stmt;
3927 }
3928 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3929 {
3930 top_bb = bb;
3931 top_stmt = use_stmt;
3932 }
3933 }
3934 }
3935
3936 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3937 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3938
3939 stmts.safe_push (top_stmt);
3940 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3941
3942 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3943 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3944 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3945 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3946
3947 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3948 {
3949 if (is_gimple_assign (use_stmt)
3950 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3951 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3952 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3953 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3954 {
3955 if (use_stmt == top_stmt
3956 || stmt_can_throw_internal (use_stmt)
3957 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3958 continue;
3959
3960 stmts.safe_push (use_stmt);
3961 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3962 div_seen = true;
3963 }
3964 }
3965
3966 if (!div_seen)
3967 return false;
3968
3969 /* Part 3: Create libcall to internal fn DIVMOD:
3970 divmod_tmp = DIVMOD (op1, op2). */
3971
3972 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3973 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3974 call_stmt, "divmod_tmp");
3975 gimple_call_set_lhs (call_stmt, res);
3976
3977 /* Insert the call before top_stmt. */
3978 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3979 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3980
3981 widen_mul_stats.divmod_calls_inserted++;
3982
3983 /* Update all statements in stmts vector:
3984 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3985 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3986
3987 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3988 {
3989 tree new_rhs;
3990
3991 switch (gimple_assign_rhs_code (use_stmt))
3992 {
3993 case TRUNC_DIV_EXPR:
3994 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3995 break;
3996
3997 case TRUNC_MOD_EXPR:
3998 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3999 break;
4000
4001 default:
4002 gcc_unreachable ();
4003 }
4004
4005 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4006 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
4007 update_stmt (use_stmt);
4008 }
4009
4010 return true;
4011 }
4012
4013 /* Find integer multiplications where the operands are extended from
4014 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4015 where appropriate. */
4016
4017 namespace {
4018
4019 const pass_data pass_data_optimize_widening_mul =
4020 {
4021 GIMPLE_PASS, /* type */
4022 "widening_mul", /* name */
4023 OPTGROUP_NONE, /* optinfo_flags */
4024 TV_NONE, /* tv_id */
4025 PROP_ssa, /* properties_required */
4026 0, /* properties_provided */
4027 0, /* properties_destroyed */
4028 0, /* todo_flags_start */
4029 TODO_update_ssa, /* todo_flags_finish */
4030 };
4031
4032 class pass_optimize_widening_mul : public gimple_opt_pass
4033 {
4034 public:
4035 pass_optimize_widening_mul (gcc::context *ctxt)
4036 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
4037 {}
4038
4039 /* opt_pass methods: */
4040 virtual bool gate (function *)
4041 {
4042 return flag_expensive_optimizations && optimize;
4043 }
4044
4045 virtual unsigned int execute (function *);
4046
4047 }; // class pass_optimize_widening_mul
4048
4049 unsigned int
4050 pass_optimize_widening_mul::execute (function *fun)
4051 {
4052 basic_block bb;
4053 bool cfg_changed = false;
4054
4055 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
4056 calculate_dominance_info (CDI_DOMINATORS);
4057 renumber_gimple_stmt_uids ();
4058
4059 FOR_EACH_BB_FN (bb, fun)
4060 {
4061 gimple_stmt_iterator gsi;
4062
4063 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4064 {
4065 gimple *stmt = gsi_stmt (gsi);
4066 enum tree_code code;
4067
4068 if (is_gimple_assign (stmt))
4069 {
4070 code = gimple_assign_rhs_code (stmt);
4071 switch (code)
4072 {
4073 case MULT_EXPR:
4074 if (!convert_mult_to_widen (stmt, &gsi)
4075 && convert_mult_to_fma (stmt,
4076 gimple_assign_rhs1 (stmt),
4077 gimple_assign_rhs2 (stmt)))
4078 {
4079 gsi_remove (&gsi, true);
4080 release_defs (stmt);
4081 continue;
4082 }
4083 break;
4084
4085 case PLUS_EXPR:
4086 case MINUS_EXPR:
4087 if (!convert_plusminus_to_widen (&gsi, stmt, code))
4088 match_uaddsub_overflow (&gsi, stmt, code);
4089 break;
4090
4091 case TRUNC_MOD_EXPR:
4092 convert_to_divmod (as_a<gassign *> (stmt));
4093 break;
4094
4095 default:;
4096 }
4097 }
4098 else if (is_gimple_call (stmt)
4099 && gimple_call_lhs (stmt))
4100 {
4101 tree fndecl = gimple_call_fndecl (stmt);
4102 if (fndecl
4103 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
4104 {
4105 switch (DECL_FUNCTION_CODE (fndecl))
4106 {
4107 case BUILT_IN_POWF:
4108 case BUILT_IN_POW:
4109 case BUILT_IN_POWL:
4110 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
4111 && real_equal
4112 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
4113 &dconst2)
4114 && convert_mult_to_fma (stmt,
4115 gimple_call_arg (stmt, 0),
4116 gimple_call_arg (stmt, 0)))
4117 {
4118 unlink_stmt_vdef (stmt);
4119 if (gsi_remove (&gsi, true)
4120 && gimple_purge_dead_eh_edges (bb))
4121 cfg_changed = true;
4122 release_defs (stmt);
4123 continue;
4124 }
4125 break;
4126
4127 default:;
4128 }
4129 }
4130 }
4131 gsi_next (&gsi);
4132 }
4133 }
4134
4135 statistics_counter_event (fun, "widening multiplications inserted",
4136 widen_mul_stats.widen_mults_inserted);
4137 statistics_counter_event (fun, "widening maccs inserted",
4138 widen_mul_stats.maccs_inserted);
4139 statistics_counter_event (fun, "fused multiply-adds inserted",
4140 widen_mul_stats.fmas_inserted);
4141 statistics_counter_event (fun, "divmod calls inserted",
4142 widen_mul_stats.divmod_calls_inserted);
4143
4144 return cfg_changed ? TODO_cleanup_cfg : 0;
4145 }
4146
4147 } // anon namespace
4148
4149 gimple_opt_pass *
4150 make_pass_optimize_widening_mul (gcc::context *ctxt)
4151 {
4152 return new pass_optimize_widening_mul (ctxt);
4153 }