]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-math-opts.c
Come up with fndecl_built_in_p.
[thirdparty/gcc.git] / gcc / tree-ssa-math-opts.c
CommitLineData
6c2a63a3 1/* Global, SSA-based optimizations using mathematical identities.
85ec4feb 2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
b8698a0f 3
6c2a63a3 4This file is part of GCC.
b8698a0f 5
6c2a63a3
PB
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
6c2a63a3 9later version.
b8698a0f 10
6c2a63a3
PB
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
6c2a63a3 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
6c2a63a3
PB
19
20/* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
bc23502b
PB
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
6edbcfc3 45 by the same divisor. This is probably because modern processors
bc23502b
PB
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
6c2a63a3
PB
86
87#include "config.h"
88#include "system.h"
89#include "coretypes.h"
c7131fb2 90#include "backend.h"
957060b5
AM
91#include "target.h"
92#include "rtl.h"
c7131fb2
AM
93#include "tree.h"
94#include "gimple.h"
957060b5
AM
95#include "predict.h"
96#include "alloc-pool.h"
97#include "tree-pass.h"
c7131fb2 98#include "ssa.h"
957060b5
AM
99#include "optabs-tree.h"
100#include "gimple-pretty-print.h"
40e23961 101#include "alias.h"
40e23961 102#include "fold-const.h"
2fb9a547 103#include "gimple-fold.h"
5be5c238 104#include "gimple-iterator.h"
73984f84 105#include "gimplify.h"
18f429e2 106#include "gimplify-me.h"
d8a2d370 107#include "stor-layout.h"
442b4905 108#include "tree-cfg.h"
442b4905 109#include "tree-dfa.h"
7a300452 110#include "tree-ssa.h"
9b2b7279 111#include "builtins.h"
b7dce216 112#include "params.h"
ee62a5a6 113#include "internal-fn.h"
c97d1c9d 114#include "case-cfn-macros.h"
e72531b9
PK
115#include "optabs-libfuncs.h"
116#include "tree-eh.h"
117#include "targhooks.h"
4a0d0ed2 118#include "domwalk.h"
bc23502b
PB
119
120/* This structure represents one basic block that either computes a
121 division, or is a common dominator for basic block that compute a
122 division. */
123struct occurrence {
124 /* The basic block represented by this structure. */
125 basic_block bb;
126
127 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
128 inserted in BB. */
129 tree recip_def;
130
19cf3a36
JW
131 /* If non-NULL, the SSA_NAME holding the definition for a squared
132 reciprocal inserted in BB. */
133 tree square_recip_def;
134
726a989a 135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
bc23502b 136 was inserted in BB. */
355fe088 137 gimple *recip_def_stmt;
bc23502b
PB
138
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
142
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
146
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
151
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
156};
157
4da3b811
NF
158static struct
159{
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
162
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165} reciprocal_stats;
166
167static struct
168{
169 /* Number of cexpi calls inserted. */
170 int inserted;
171} sincos_stats;
172
4da3b811
NF
173static struct
174{
175 /* Number of widening multiplication ops inserted. */
176 int widen_mults_inserted;
177
178 /* Number of integer multiply-and-accumulate ops inserted. */
179 int maccs_inserted;
180
181 /* Number of fp fused multiply-add ops inserted. */
182 int fmas_inserted;
e72531b9
PK
183
184 /* Number of divmod calls inserted. */
185 int divmod_calls_inserted;
4da3b811 186} widen_mul_stats;
bc23502b
PB
187
188/* The instance of "struct occurrence" representing the highest
189 interesting block in the dominator tree. */
190static struct occurrence *occ_head;
191
192/* Allocation pool for getting instances of "struct occurrence". */
fb0b2914 193static object_allocator<occurrence> *occ_pool;
bc23502b
PB
194
195
196
197/* Allocate and return a new struct occurrence for basic block BB, and
198 whose children list is headed by CHILDREN. */
199static struct occurrence *
200occ_new (basic_block bb, struct occurrence *children)
6c2a63a3 201{
bc23502b
PB
202 struct occurrence *occ;
203
2cc777fb 204 bb->aux = occ = occ_pool->allocate ();
bc23502b
PB
205 memset (occ, 0, sizeof (struct occurrence));
206
207 occ->bb = bb;
208 occ->children = children;
209 return occ;
6c2a63a3
PB
210}
211
bc23502b
PB
212
213/* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
214 list of "struct occurrence"s, one per basic block, having IDOM as
215 their common dominator.
216
217 We try to insert NEW_OCC as deep as possible in the tree, and we also
218 insert any other block that is a common dominator for BB and one
219 block already in the tree. */
220
221static void
222insert_bb (struct occurrence *new_occ, basic_block idom,
223 struct occurrence **p_head)
2ef571e2 224{
bc23502b 225 struct occurrence *occ, **p_occ;
2ef571e2 226
bc23502b
PB
227 for (p_occ = p_head; (occ = *p_occ) != NULL; )
228 {
229 basic_block bb = new_occ->bb, occ_bb = occ->bb;
230 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
231 if (dom == bb)
232 {
233 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
234 from its list. */
235 *p_occ = occ->next;
236 occ->next = new_occ->children;
237 new_occ->children = occ;
238
239 /* Try the next block (it may as well be dominated by BB). */
240 }
241
242 else if (dom == occ_bb)
243 {
244 /* OCC_BB dominates BB. Tail recurse to look deeper. */
245 insert_bb (new_occ, dom, &occ->children);
246 return;
247 }
248
249 else if (dom != idom)
250 {
251 gcc_assert (!dom->aux);
252
253 /* There is a dominator between IDOM and BB, add it and make
254 two children out of NEW_OCC and OCC. First, remove OCC from
255 its list. */
256 *p_occ = occ->next;
257 new_occ->next = occ;
258 occ->next = NULL;
259
260 /* None of the previous blocks has DOM as a dominator: if we tail
261 recursed, we would reexamine them uselessly. Just switch BB with
262 DOM, and go on looking for blocks dominated by DOM. */
263 new_occ = occ_new (dom, new_occ);
264 }
265
266 else
267 {
268 /* Nothing special, go on with the next element. */
269 p_occ = &occ->next;
270 }
271 }
272
273 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
274 new_occ->next = *p_head;
275 *p_head = new_occ;
276}
277
19cf3a36
JW
278/* Register that we found a division in BB.
279 IMPORTANCE is a measure of how much weighting to give
280 that division. Use IMPORTANCE = 2 to register a single
281 division. If the division is going to be found multiple
282 times use 1 (as it is with squares). */
bc23502b
PB
283
284static inline void
19cf3a36 285register_division_in (basic_block bb, int importance)
bc23502b
PB
286{
287 struct occurrence *occ;
288
289 occ = (struct occurrence *) bb->aux;
290 if (!occ)
291 {
292 occ = occ_new (bb, NULL);
fefa31b5 293 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
bc23502b
PB
294 }
295
296 occ->bb_has_division = true;
19cf3a36 297 occ->num_divisions += importance;
bc23502b
PB
298}
299
300
301/* Compute the number of divisions that postdominate each block in OCC and
302 its children. */
6c2a63a3 303
6c2a63a3 304static void
bc23502b 305compute_merit (struct occurrence *occ)
6c2a63a3 306{
bc23502b
PB
307 struct occurrence *occ_child;
308 basic_block dom = occ->bb;
6c2a63a3 309
bc23502b 310 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
6c2a63a3 311 {
bc23502b
PB
312 basic_block bb;
313 if (occ_child->children)
314 compute_merit (occ_child);
315
316 if (flag_exceptions)
317 bb = single_noncomplex_succ (dom);
318 else
319 bb = dom;
320
321 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
322 occ->num_divisions += occ_child->num_divisions;
323 }
324}
325
326
327/* Return whether USE_STMT is a floating-point division by DEF. */
328static inline bool
355fe088 329is_division_by (gimple *use_stmt, tree def)
bc23502b 330{
726a989a
RB
331 return is_gimple_assign (use_stmt)
332 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
333 && gimple_assign_rhs2 (use_stmt) == def
8a5b57cd
RG
334 /* Do not recognize x / x as valid division, as we are getting
335 confused later by replacing all immediate uses x in such
336 a stmt. */
726a989a 337 && gimple_assign_rhs1 (use_stmt) != def;
bc23502b
PB
338}
339
19cf3a36
JW
340/* Return whether USE_STMT is DEF * DEF. */
341static inline bool
342is_square_of (gimple *use_stmt, tree def)
343{
344 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
345 && gimple_assign_rhs_code (use_stmt) == MULT_EXPR)
346 {
347 tree op0 = gimple_assign_rhs1 (use_stmt);
348 tree op1 = gimple_assign_rhs2 (use_stmt);
349
350 return op0 == op1 && op0 == def;
351 }
352 return 0;
353}
354
355/* Return whether USE_STMT is a floating-point division by
356 DEF * DEF. */
357static inline bool
358is_division_by_square (gimple *use_stmt, tree def)
359{
360 if (gimple_code (use_stmt) == GIMPLE_ASSIGN
361 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
362 && gimple_assign_rhs1 (use_stmt) != gimple_assign_rhs2 (use_stmt))
363 {
364 tree denominator = gimple_assign_rhs2 (use_stmt);
365 if (TREE_CODE (denominator) == SSA_NAME)
366 {
367 return is_square_of (SSA_NAME_DEF_STMT (denominator), def);
368 }
369 }
370 return 0;
371}
372
bc23502b
PB
373/* Walk the subset of the dominator tree rooted at OCC, setting the
374 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
375 the given basic block. The field may be left NULL, of course,
376 if it is not possible or profitable to do the optimization.
377
378 DEF_BSI is an iterator pointing at the statement defining DEF.
379 If RECIP_DEF is set, a dominator already has a computation that can
19cf3a36
JW
380 be used.
381
382 If should_insert_square_recip is set, then this also inserts
383 the square of the reciprocal immediately after the definition
384 of the reciprocal. */
bc23502b
PB
385
386static void
726a989a 387insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
19cf3a36
JW
388 tree def, tree recip_def, tree square_recip_def,
389 int should_insert_square_recip, int threshold)
bc23502b 390{
726a989a 391 tree type;
19cf3a36 392 gassign *new_stmt, *new_square_stmt;
726a989a 393 gimple_stmt_iterator gsi;
bc23502b
PB
394 struct occurrence *occ_child;
395
396 if (!recip_def
397 && (occ->bb_has_division || !flag_trapping_math)
19cf3a36
JW
398 /* Divide by two as all divisions are counted twice in
399 the costing loop. */
400 && occ->num_divisions / 2 >= threshold)
bc23502b
PB
401 {
402 /* Make a variable with the replacement and substitute it. */
403 type = TREE_TYPE (def);
7cc434a3 404 recip_def = create_tmp_reg (type, "reciptmp");
0d0e4a03
JJ
405 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
406 build_one_cst (type), def);
b8698a0f 407
19cf3a36
JW
408 if (should_insert_square_recip)
409 {
410 square_recip_def = create_tmp_reg (type, "powmult_reciptmp");
411 new_square_stmt = gimple_build_assign (square_recip_def, MULT_EXPR,
412 recip_def, recip_def);
413 }
414
bc23502b 415 if (occ->bb_has_division)
19cf3a36
JW
416 {
417 /* Case 1: insert before an existing division. */
418 gsi = gsi_after_labels (occ->bb);
419 while (!gsi_end_p (gsi)
420 && (!is_division_by (gsi_stmt (gsi), def))
421 && (!is_division_by_square (gsi_stmt (gsi), def)))
726a989a 422 gsi_next (&gsi);
bc23502b 423
19cf3a36 424 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
8be591a9
JJ
425 if (should_insert_square_recip)
426 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
19cf3a36 427 }
726a989a 428 else if (def_gsi && occ->bb == def_gsi->bb)
19cf3a36
JW
429 {
430 /* Case 2: insert right after the definition. Note that this will
bc23502b
PB
431 never happen if the definition statement can throw, because in
432 that case the sole successor of the statement's basic block will
433 dominate all the uses as well. */
19cf3a36 434 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
8be591a9
JJ
435 if (should_insert_square_recip)
436 gsi_insert_after (def_gsi, new_square_stmt, GSI_NEW_STMT);
19cf3a36 437 }
bc23502b 438 else
19cf3a36
JW
439 {
440 /* Case 3: insert in a basic block not containing defs/uses. */
441 gsi = gsi_after_labels (occ->bb);
442 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
8be591a9
JJ
443 if (should_insert_square_recip)
444 gsi_insert_before (&gsi, new_square_stmt, GSI_SAME_STMT);
19cf3a36
JW
445 }
446
4da3b811
NF
447 reciprocal_stats.rdivs_inserted++;
448
bc23502b 449 occ->recip_def_stmt = new_stmt;
6c2a63a3
PB
450 }
451
bc23502b 452 occ->recip_def = recip_def;
19cf3a36 453 occ->square_recip_def = square_recip_def;
bc23502b 454 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
19cf3a36
JW
455 insert_reciprocals (def_gsi, occ_child, def, recip_def,
456 square_recip_def, should_insert_square_recip,
457 threshold);
458}
459
460/* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
461 Take as argument the use for (x * x). */
462static inline void
463replace_reciprocal_squares (use_operand_p use_p)
464{
465 gimple *use_stmt = USE_STMT (use_p);
466 basic_block bb = gimple_bb (use_stmt);
467 struct occurrence *occ = (struct occurrence *) bb->aux;
468
469 if (optimize_bb_for_speed_p (bb) && occ->square_recip_def
470 && occ->recip_def)
471 {
472 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
473 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
474 gimple_assign_set_rhs2 (use_stmt, occ->square_recip_def);
475 SET_USE (use_p, occ->square_recip_def);
476 fold_stmt_inplace (&gsi);
477 update_stmt (use_stmt);
478 }
bc23502b
PB
479}
480
481
482/* Replace the division at USE_P with a multiplication by the reciprocal, if
483 possible. */
484
485static inline void
486replace_reciprocal (use_operand_p use_p)
487{
355fe088 488 gimple *use_stmt = USE_STMT (use_p);
726a989a 489 basic_block bb = gimple_bb (use_stmt);
bc23502b
PB
490 struct occurrence *occ = (struct occurrence *) bb->aux;
491
efd8f750
JH
492 if (optimize_bb_for_speed_p (bb)
493 && occ->recip_def && use_stmt != occ->recip_def_stmt)
bc23502b 494 {
59401b92 495 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
726a989a 496 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
bc23502b 497 SET_USE (use_p, occ->recip_def);
59401b92 498 fold_stmt_inplace (&gsi);
bc23502b
PB
499 update_stmt (use_stmt);
500 }
501}
502
503
504/* Free OCC and return one more "struct occurrence" to be freed. */
505
506static struct occurrence *
507free_bb (struct occurrence *occ)
508{
509 struct occurrence *child, *next;
510
511 /* First get the two pointers hanging off OCC. */
512 next = occ->next;
513 child = occ->children;
514 occ->bb->aux = NULL;
2cc777fb 515 occ_pool->remove (occ);
bc23502b
PB
516
517 /* Now ensure that we don't recurse unless it is necessary. */
518 if (!child)
519 return next;
2ef571e2 520 else
bc23502b
PB
521 {
522 while (next)
523 next = free_bb (next);
524
525 return child;
526 }
527}
528
529
530/* Look for floating-point divisions among DEF's uses, and try to
531 replace them by multiplications with the reciprocal. Add
532 as many statements computing the reciprocal as needed.
533
534 DEF must be a GIMPLE register of a floating-point type. */
535
536static void
726a989a 537execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
bc23502b 538{
19cf3a36
JW
539 use_operand_p use_p, square_use_p;
540 imm_use_iterator use_iter, square_use_iter;
541 tree square_def;
bc23502b 542 struct occurrence *occ;
19cf3a36
JW
543 int count = 0;
544 int threshold;
545 int square_recip_count = 0;
546 int sqrt_recip_count = 0;
6c2a63a3 547
682820cc 548 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && TREE_CODE (def) == SSA_NAME);
19cf3a36
JW
549 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
550
5f84eb27
WD
551 /* If DEF is a square (x * x), count the number of divisions by x.
552 If there are more divisions by x than by (DEF * DEF), prefer to optimize
553 the reciprocal of x instead of DEF. This improves cases like:
554 def = x * x
555 t0 = a / def
556 t1 = b / def
557 t2 = c / x
558 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
559 gimple *def_stmt = SSA_NAME_DEF_STMT (def);
560
561 if (is_gimple_assign (def_stmt)
562 && gimple_assign_rhs_code (def_stmt) == MULT_EXPR
563 && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
564 && gimple_assign_rhs1 (def_stmt) == gimple_assign_rhs2 (def_stmt))
19cf3a36 565 {
5f84eb27 566 tree op0 = gimple_assign_rhs1 (def_stmt);
19cf3a36 567
5f84eb27 568 FOR_EACH_IMM_USE_FAST (use_p, use_iter, op0)
19cf3a36 569 {
5f84eb27
WD
570 gimple *use_stmt = USE_STMT (use_p);
571 if (is_division_by (use_stmt, op0))
572 sqrt_recip_count++;
19cf3a36
JW
573 }
574 }
bc23502b
PB
575
576 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
6c2a63a3 577 {
355fe088 578 gimple *use_stmt = USE_STMT (use_p);
bc23502b 579 if (is_division_by (use_stmt, def))
6c2a63a3 580 {
19cf3a36 581 register_division_in (gimple_bb (use_stmt), 2);
bc23502b 582 count++;
6c2a63a3 583 }
19cf3a36
JW
584
585 if (is_square_of (use_stmt, def))
586 {
587 square_def = gimple_assign_lhs (use_stmt);
588 FOR_EACH_IMM_USE_FAST (square_use_p, square_use_iter, square_def)
589 {
590 gimple *square_use_stmt = USE_STMT (square_use_p);
591 if (is_division_by (square_use_stmt, square_def))
592 {
5f84eb27 593 /* This is executed twice for each division by a square. */
19cf3a36 594 register_division_in (gimple_bb (square_use_stmt), 1);
5f84eb27 595 square_recip_count++;
19cf3a36
JW
596 }
597 }
598 }
6c2a63a3 599 }
b8698a0f 600
5f84eb27 601 /* Square reciprocals were counted twice above. */
19cf3a36
JW
602 square_recip_count /= 2;
603
5f84eb27 604 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
19cf3a36 605 if (sqrt_recip_count > square_recip_count)
19cf3a36
JW
606 return;
607
bc23502b 608 /* Do the expensive part only if we can hope to optimize something. */
5f84eb27 609 if (count + square_recip_count >= threshold && count >= 1)
bc23502b 610 {
355fe088 611 gimple *use_stmt;
bc23502b
PB
612 for (occ = occ_head; occ; occ = occ->next)
613 {
614 compute_merit (occ);
19cf3a36
JW
615 insert_reciprocals (def_gsi, occ, def, NULL, NULL,
616 square_recip_count, threshold);
bc23502b
PB
617 }
618
6c00f606 619 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
bc23502b 620 {
bc23502b 621 if (is_division_by (use_stmt, def))
6c00f606
AM
622 {
623 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
624 replace_reciprocal (use_p);
625 }
5f84eb27 626 else if (square_recip_count > 0 && is_square_of (use_stmt, def))
19cf3a36
JW
627 {
628 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
629 {
630 /* Find all uses of the square that are divisions and
631 * replace them by multiplications with the inverse. */
632 imm_use_iterator square_iterator;
633 gimple *powmult_use_stmt = USE_STMT (use_p);
634 tree powmult_def_name = gimple_assign_lhs (powmult_use_stmt);
635
636 FOR_EACH_IMM_USE_STMT (powmult_use_stmt,
637 square_iterator, powmult_def_name)
638 FOR_EACH_IMM_USE_ON_STMT (square_use_p, square_iterator)
639 {
640 gimple *powmult_use_stmt = USE_STMT (square_use_p);
641 if (is_division_by (powmult_use_stmt, powmult_def_name))
642 replace_reciprocal_squares (square_use_p);
643 }
644 }
645 }
bc23502b
PB
646 }
647 }
648
649 for (occ = occ_head; occ; )
650 occ = free_bb (occ);
651
652 occ_head = NULL;
6c2a63a3
PB
653}
654
ee62a5a6
RS
655/* Return an internal function that implements the reciprocal of CALL,
656 or IFN_LAST if there is no such function that the target supports. */
657
658internal_fn
659internal_fn_reciprocal (gcall *call)
660{
661 internal_fn ifn;
662
663 switch (gimple_call_combined_fn (call))
664 {
665 CASE_CFN_SQRT:
ee5fd23a 666 CASE_CFN_SQRT_FN:
ee62a5a6
RS
667 ifn = IFN_RSQRT;
668 break;
669
670 default:
671 return IFN_LAST;
672 }
673
674 tree_pair types = direct_internal_fn_types (ifn, call);
675 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
676 return IFN_LAST;
677
678 return ifn;
679}
680
bc23502b
PB
681/* Go through all the floating-point SSA_NAMEs, and call
682 execute_cse_reciprocals_1 on each of them. */
be55bfe6
TS
683namespace {
684
685const pass_data pass_data_cse_reciprocals =
686{
687 GIMPLE_PASS, /* type */
688 "recip", /* name */
689 OPTGROUP_NONE, /* optinfo_flags */
1b6546cc 690 TV_TREE_RECIP, /* tv_id */
be55bfe6
TS
691 PROP_ssa, /* properties_required */
692 0, /* properties_provided */
693 0, /* properties_destroyed */
694 0, /* todo_flags_start */
3bea341f 695 TODO_update_ssa, /* todo_flags_finish */
be55bfe6
TS
696};
697
698class pass_cse_reciprocals : public gimple_opt_pass
699{
700public:
701 pass_cse_reciprocals (gcc::context *ctxt)
702 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
703 {}
704
705 /* opt_pass methods: */
706 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
707 virtual unsigned int execute (function *);
708
709}; // class pass_cse_reciprocals
710
711unsigned int
712pass_cse_reciprocals::execute (function *fun)
6c2a63a3
PB
713{
714 basic_block bb;
a8f82ec4 715 tree arg;
ac264fef 716
fcb87c50 717 occ_pool = new object_allocator<occurrence> ("dominators for recip");
ac264fef 718
4da3b811 719 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
d898f3ce
PB
720 calculate_dominance_info (CDI_DOMINATORS);
721 calculate_dominance_info (CDI_POST_DOMINATORS);
bc23502b 722
b2b29377
MM
723 if (flag_checking)
724 FOR_EACH_BB_FN (bb, fun)
725 gcc_assert (!bb->aux);
bc23502b 726
be55bfe6 727 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
32244553 728 if (FLOAT_TYPE_P (TREE_TYPE (arg))
bc23502b 729 && is_gimple_reg (arg))
32244553 730 {
be55bfe6 731 tree name = ssa_default_def (fun, arg);
32244553
RG
732 if (name)
733 execute_cse_reciprocals_1 (NULL, name);
734 }
a8f82ec4 735
be55bfe6 736 FOR_EACH_BB_FN (bb, fun)
6c2a63a3 737 {
726a989a 738 tree def;
6c2a63a3 739
538dd0b7
DM
740 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
741 gsi_next (&gsi))
6c2a63a3 742 {
538dd0b7 743 gphi *phi = gsi.phi ();
6c2a63a3 744 def = PHI_RESULT (phi);
ea057359
RG
745 if (! virtual_operand_p (def)
746 && FLOAT_TYPE_P (TREE_TYPE (def)))
bc23502b 747 execute_cse_reciprocals_1 (NULL, def);
6c2a63a3
PB
748 }
749
538dd0b7
DM
750 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
751 gsi_next (&gsi))
6c2a63a3 752 {
355fe088 753 gimple *stmt = gsi_stmt (gsi);
2f397a93 754
726a989a 755 if (gimple_has_lhs (stmt)
6c2a63a3
PB
756 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
757 && FLOAT_TYPE_P (TREE_TYPE (def))
a8f82ec4 758 && TREE_CODE (def) == SSA_NAME)
726a989a 759 execute_cse_reciprocals_1 (&gsi, def);
6c2a63a3 760 }
6b889d89 761
efd8f750
JH
762 if (optimize_bb_for_size_p (bb))
763 continue;
764
6b889d89 765 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
538dd0b7
DM
766 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
767 gsi_next (&gsi))
6b889d89 768 {
355fe088 769 gimple *stmt = gsi_stmt (gsi);
6b889d89 770
726a989a
RB
771 if (is_gimple_assign (stmt)
772 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
6b889d89 773 {
726a989a 774 tree arg1 = gimple_assign_rhs2 (stmt);
355fe088 775 gimple *stmt1;
ac10986f
UB
776
777 if (TREE_CODE (arg1) != SSA_NAME)
778 continue;
779
780 stmt1 = SSA_NAME_DEF_STMT (arg1);
6b889d89 781
726a989a 782 if (is_gimple_call (stmt1)
ee62a5a6 783 && gimple_call_lhs (stmt1))
6b889d89 784 {
7b90c63a 785 bool fail;
79af7c1f
MM
786 imm_use_iterator ui;
787 use_operand_p use_p;
ee62a5a6 788 tree fndecl = NULL_TREE;
6b889d89 789
ee62a5a6
RS
790 gcall *call = as_a <gcall *> (stmt1);
791 internal_fn ifn = internal_fn_reciprocal (call);
792 if (ifn == IFN_LAST)
793 {
794 fndecl = gimple_call_fndecl (call);
795 if (!fndecl
3d78e008 796 || !fndecl_built_in_p (fndecl, BUILT_IN_MD))
ee62a5a6
RS
797 continue;
798 fndecl = targetm.builtin_reciprocal (fndecl);
799 if (!fndecl)
800 continue;
801 }
6b889d89 802
79af7c1f
MM
803 /* Check that all uses of the SSA name are divisions,
804 otherwise replacing the defining statement will do
805 the wrong thing. */
806 fail = false;
807 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
808 {
355fe088 809 gimple *stmt2 = USE_STMT (use_p);
79af7c1f
MM
810 if (is_gimple_debug (stmt2))
811 continue;
812 if (!is_gimple_assign (stmt2)
813 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
814 || gimple_assign_rhs1 (stmt2) == arg1
815 || gimple_assign_rhs2 (stmt2) != arg1)
816 {
817 fail = true;
818 break;
819 }
820 }
821 if (fail)
822 continue;
823
ee62a5a6
RS
824 gimple_replace_ssa_lhs (call, arg1);
825 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
7b90c63a
JJ
826 {
827 auto_vec<tree, 4> args;
828 for (unsigned int i = 0;
ee62a5a6
RS
829 i < gimple_call_num_args (call); i++)
830 args.safe_push (gimple_call_arg (call, i));
831 gcall *stmt2;
832 if (ifn == IFN_LAST)
833 stmt2 = gimple_build_call_vec (fndecl, args);
834 else
835 stmt2 = gimple_build_call_internal_vec (ifn, args);
7b90c63a 836 gimple_call_set_lhs (stmt2, arg1);
ee62a5a6 837 if (gimple_vdef (call))
7b90c63a 838 {
ee62a5a6 839 gimple_set_vdef (stmt2, gimple_vdef (call));
7b90c63a
JJ
840 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
841 }
a844293d
RS
842 gimple_call_set_nothrow (stmt2,
843 gimple_call_nothrow_p (call));
ee62a5a6
RS
844 gimple_set_vuse (stmt2, gimple_vuse (call));
845 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
7b90c63a
JJ
846 gsi_replace (&gsi2, stmt2, true);
847 }
848 else
849 {
ee62a5a6
RS
850 if (ifn == IFN_LAST)
851 gimple_call_set_fndecl (call, fndecl);
852 else
853 gimple_call_set_internal_fn (call, ifn);
854 update_stmt (call);
7b90c63a 855 }
4da3b811 856 reciprocal_stats.rfuncs_inserted++;
6b889d89 857
79af7c1f
MM
858 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
859 {
59401b92 860 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
79af7c1f 861 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
59401b92 862 fold_stmt_inplace (&gsi);
79af7c1f
MM
863 update_stmt (stmt);
864 }
6b889d89
UB
865 }
866 }
867 }
6c2a63a3 868 }
ac264fef 869
be55bfe6 870 statistics_counter_event (fun, "reciprocal divs inserted",
4da3b811 871 reciprocal_stats.rdivs_inserted);
be55bfe6 872 statistics_counter_event (fun, "reciprocal functions inserted",
4da3b811
NF
873 reciprocal_stats.rfuncs_inserted);
874
d898f3ce
PB
875 free_dominance_info (CDI_DOMINATORS);
876 free_dominance_info (CDI_POST_DOMINATORS);
2cc777fb 877 delete occ_pool;
c2924966 878 return 0;
6c2a63a3
PB
879}
880
27a4cd48
DM
881} // anon namespace
882
883gimple_opt_pass *
884make_pass_cse_reciprocals (gcc::context *ctxt)
885{
886 return new pass_cse_reciprocals (ctxt);
887}
888
88512ba0 889/* Records an occurrence at statement USE_STMT in the vector of trees
2f397a93 890 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
88512ba0 891 is not yet initialized. Returns true if the occurrence was pushed on
2f397a93
RG
892 the vector. Adjusts *TOP_BB to be the basic block dominating all
893 statements in the vector. */
894
895static bool
355fe088
TS
896maybe_record_sincos (vec<gimple *> *stmts,
897 basic_block *top_bb, gimple *use_stmt)
2f397a93 898{
726a989a 899 basic_block use_bb = gimple_bb (use_stmt);
2f397a93
RG
900 if (*top_bb
901 && (*top_bb == use_bb
902 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
9771b263 903 stmts->safe_push (use_stmt);
2f397a93
RG
904 else if (!*top_bb
905 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
906 {
9771b263 907 stmts->safe_push (use_stmt);
2f397a93
RG
908 *top_bb = use_bb;
909 }
910 else
911 return false;
912
913 return true;
914}
915
916/* Look for sin, cos and cexpi calls with the same argument NAME and
917 create a single call to cexpi CSEing the result in this case.
918 We first walk over all immediate uses of the argument collecting
919 statements that we can CSE in a vector and in a second pass replace
920 the statement rhs with a REALPART or IMAGPART expression on the
921 result of the cexpi call we insert before the use statement that
922 dominates all other candidates. */
923
90bc1cb8 924static bool
2f397a93
RG
925execute_cse_sincos_1 (tree name)
926{
726a989a 927 gimple_stmt_iterator gsi;
2f397a93 928 imm_use_iterator use_iter;
726a989a 929 tree fndecl, res, type;
355fe088 930 gimple *def_stmt, *use_stmt, *stmt;
2f397a93 931 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
355fe088 932 auto_vec<gimple *> stmts;
2f397a93
RG
933 basic_block top_bb = NULL;
934 int i;
90bc1cb8 935 bool cfg_changed = false;
2f397a93
RG
936
937 type = TREE_TYPE (name);
938 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
939 {
726a989a 940 if (gimple_code (use_stmt) != GIMPLE_CALL
c97d1c9d 941 || !gimple_call_lhs (use_stmt))
2f397a93
RG
942 continue;
943
c97d1c9d 944 switch (gimple_call_combined_fn (use_stmt))
2f397a93 945 {
c97d1c9d 946 CASE_CFN_COS:
2f397a93
RG
947 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
948 break;
949
c97d1c9d 950 CASE_CFN_SIN:
2f397a93
RG
951 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
952 break;
953
c97d1c9d 954 CASE_CFN_CEXPI:
2f397a93
RG
955 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
956 break;
957
958 default:;
959 }
960 }
961
962 if (seen_cos + seen_sin + seen_cexpi <= 1)
9370adeb 963 return false;
2f397a93
RG
964
965 /* Simply insert cexpi at the beginning of top_bb but not earlier than
966 the name def statement. */
967 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
968 if (!fndecl)
90bc1cb8 969 return false;
726a989a 970 stmt = gimple_build_call (fndecl, 1, name);
83d5977e 971 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
726a989a
RB
972 gimple_call_set_lhs (stmt, res);
973
2f397a93 974 def_stmt = SSA_NAME_DEF_STMT (name);
59805c3b 975 if (!SSA_NAME_IS_DEFAULT_DEF (name)
726a989a
RB
976 && gimple_code (def_stmt) != GIMPLE_PHI
977 && gimple_bb (def_stmt) == top_bb)
2f397a93 978 {
726a989a
RB
979 gsi = gsi_for_stmt (def_stmt);
980 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
2f397a93
RG
981 }
982 else
983 {
726a989a
RB
984 gsi = gsi_after_labels (top_bb);
985 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2f397a93 986 }
4da3b811 987 sincos_stats.inserted++;
2f397a93
RG
988
989 /* And adjust the recorded old call sites. */
9771b263 990 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
2f397a93 991 {
726a989a 992 tree rhs = NULL;
726a989a 993
c97d1c9d 994 switch (gimple_call_combined_fn (use_stmt))
2f397a93 995 {
c97d1c9d 996 CASE_CFN_COS:
726a989a 997 rhs = fold_build1 (REALPART_EXPR, type, res);
2f397a93
RG
998 break;
999
c97d1c9d 1000 CASE_CFN_SIN:
726a989a 1001 rhs = fold_build1 (IMAGPART_EXPR, type, res);
2f397a93
RG
1002 break;
1003
c97d1c9d 1004 CASE_CFN_CEXPI:
726a989a 1005 rhs = res;
2f397a93
RG
1006 break;
1007
1008 default:;
1009 gcc_unreachable ();
1010 }
1011
726a989a
RB
1012 /* Replace call with a copy. */
1013 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
1014
1015 gsi = gsi_for_stmt (use_stmt);
90bc1cb8
RG
1016 gsi_replace (&gsi, stmt, true);
1017 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
1018 cfg_changed = true;
2f397a93
RG
1019 }
1020
90bc1cb8 1021 return cfg_changed;
2f397a93
RG
1022}
1023
78be79d5
BS
1024/* To evaluate powi(x,n), the floating point value x raised to the
1025 constant integer exponent n, we use a hybrid algorithm that
1026 combines the "window method" with look-up tables. For an
1027 introduction to exponentiation algorithms and "addition chains",
1028 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1029 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1030 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1031 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1032
1033/* Provide a default value for POWI_MAX_MULTS, the maximum number of
1034 multiplications to inline before calling the system library's pow
1035 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1036 so this default never requires calling pow, powf or powl. */
1037
1038#ifndef POWI_MAX_MULTS
1039#define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1040#endif
1041
1042/* The size of the "optimal power tree" lookup table. All
1043 exponents less than this value are simply looked up in the
1044 powi_table below. This threshold is also used to size the
1045 cache of pseudo registers that hold intermediate results. */
1046#define POWI_TABLE_SIZE 256
1047
1048/* The size, in bits of the window, used in the "window method"
1049 exponentiation algorithm. This is equivalent to a radix of
1050 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1051#define POWI_WINDOW_SIZE 3
1052
1053/* The following table is an efficient representation of an
1054 "optimal power tree". For each value, i, the corresponding
1055 value, j, in the table states than an optimal evaluation
1056 sequence for calculating pow(x,i) can be found by evaluating
1057 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1058 100 integers is given in Knuth's "Seminumerical algorithms". */
1059
1060static const unsigned char powi_table[POWI_TABLE_SIZE] =
1061 {
1062 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1063 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1064 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1065 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1066 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1067 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1068 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1069 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1070 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1071 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1072 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1073 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1074 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1075 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1076 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1077 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1078 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1079 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1080 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1081 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1082 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1083 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1084 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1085 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1086 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1087 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1088 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1089 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1090 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1091 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1092 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1093 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1094 };
1095
1096
1097/* Return the number of multiplications required to calculate
1098 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1099 subroutine of powi_cost. CACHE is an array indicating
1100 which exponents have already been calculated. */
1101
1102static int
1103powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
1104{
1105 /* If we've already calculated this exponent, then this evaluation
1106 doesn't require any additional multiplications. */
1107 if (cache[n])
1108 return 0;
1109
1110 cache[n] = true;
1111 return powi_lookup_cost (n - powi_table[n], cache)
1112 + powi_lookup_cost (powi_table[n], cache) + 1;
1113}
1114
1115/* Return the number of multiplications required to calculate
1116 powi(x,n) for an arbitrary x, given the exponent N. This
1117 function needs to be kept in sync with powi_as_mults below. */
1118
1119static int
1120powi_cost (HOST_WIDE_INT n)
1121{
1122 bool cache[POWI_TABLE_SIZE];
1123 unsigned HOST_WIDE_INT digit;
1124 unsigned HOST_WIDE_INT val;
1125 int result;
1126
1127 if (n == 0)
1128 return 0;
1129
1130 /* Ignore the reciprocal when calculating the cost. */
1131 val = (n < 0) ? -n : n;
1132
1133 /* Initialize the exponent cache. */
1134 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
1135 cache[1] = true;
1136
1137 result = 0;
1138
1139 while (val >= POWI_TABLE_SIZE)
1140 {
1141 if (val & 1)
1142 {
1143 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
1144 result += powi_lookup_cost (digit, cache)
1145 + POWI_WINDOW_SIZE + 1;
1146 val >>= POWI_WINDOW_SIZE;
1147 }
1148 else
1149 {
1150 val >>= 1;
1151 result++;
1152 }
1153 }
1154
1155 return result + powi_lookup_cost (val, cache);
1156}
1157
1158/* Recursive subroutine of powi_as_mults. This function takes the
1159 array, CACHE, of already calculated exponents and an exponent N and
1160 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1161
1162static tree
1163powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
83d5977e 1164 HOST_WIDE_INT n, tree *cache)
78be79d5
BS
1165{
1166 tree op0, op1, ssa_target;
1167 unsigned HOST_WIDE_INT digit;
538dd0b7 1168 gassign *mult_stmt;
78be79d5
BS
1169
1170 if (n < POWI_TABLE_SIZE && cache[n])
1171 return cache[n];
1172
83d5977e 1173 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
78be79d5
BS
1174
1175 if (n < POWI_TABLE_SIZE)
1176 {
1177 cache[n] = ssa_target;
83d5977e
RG
1178 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1179 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
78be79d5
BS
1180 }
1181 else if (n & 1)
1182 {
1183 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
83d5977e
RG
1184 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1185 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
78be79d5
BS
1186 }
1187 else
1188 {
83d5977e 1189 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
78be79d5
BS
1190 op1 = op0;
1191 }
1192
0d0e4a03 1193 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
ba869341 1194 gimple_set_location (mult_stmt, loc);
78be79d5
BS
1195 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1196
1197 return ssa_target;
1198}
1199
1200/* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1201 This function needs to be kept in sync with powi_cost above. */
1202
1203static tree
1204powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1205 tree arg0, HOST_WIDE_INT n)
1206{
83d5977e 1207 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
538dd0b7 1208 gassign *div_stmt;
83d5977e 1209 tree target;
78be79d5
BS
1210
1211 if (n == 0)
1212 return build_real (type, dconst1);
1213
1214 memset (cache, 0, sizeof (cache));
1215 cache[1] = arg0;
1216
83d5977e 1217 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
78be79d5
BS
1218 if (n >= 0)
1219 return result;
1220
1221 /* If the original exponent was negative, reciprocate the result. */
83d5977e 1222 target = make_temp_ssa_name (type, NULL, "powmult");
0d0e4a03
JJ
1223 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1224 build_real (type, dconst1), result);
ba869341 1225 gimple_set_location (div_stmt, loc);
78be79d5
BS
1226 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1227
1228 return target;
1229}
1230
1231/* ARG0 and N are the two arguments to a powi builtin in GSI with
1232 location info LOC. If the arguments are appropriate, create an
1233 equivalent sequence of statements prior to GSI using an optimal
1234 number of multiplications, and return an expession holding the
1235 result. */
1236
1237static tree
1238gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1239 tree arg0, HOST_WIDE_INT n)
1240{
1241 /* Avoid largest negative number. */
1242 if (n != -n
1243 && ((n >= -1 && n <= 2)
1244 || (optimize_function_for_speed_p (cfun)
1245 && powi_cost (n) <= POWI_MAX_MULTS)))
1246 return powi_as_mults (gsi, loc, arg0, n);
1247
1248 return NULL_TREE;
1249}
1250
ba869341 1251/* Build a gimple call statement that calls FN with argument ARG.
83d5977e 1252 Set the lhs of the call statement to a fresh SSA name. Insert the
ba869341
BS
1253 statement prior to GSI's current position, and return the fresh
1254 SSA name. */
1255
1256static tree
6e96f98a 1257build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
83d5977e 1258 tree fn, tree arg)
ba869341 1259{
538dd0b7 1260 gcall *call_stmt;
ba869341
BS
1261 tree ssa_target;
1262
ba869341 1263 call_stmt = gimple_build_call (fn, 1, arg);
83d5977e 1264 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
ba869341
BS
1265 gimple_set_lhs (call_stmt, ssa_target);
1266 gimple_set_location (call_stmt, loc);
1267 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1268
1269 return ssa_target;
1270}
1271
6e96f98a
BS
1272/* Build a gimple binary operation with the given CODE and arguments
1273 ARG0, ARG1, assigning the result to a new SSA name for variable
1274 TARGET. Insert the statement prior to GSI's current position, and
1275 return the fresh SSA name.*/
1276
1277static tree
1278build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
83d5977e
RG
1279 const char *name, enum tree_code code,
1280 tree arg0, tree arg1)
6e96f98a 1281{
83d5977e 1282 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
0d0e4a03 1283 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
6e96f98a
BS
1284 gimple_set_location (stmt, loc);
1285 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1286 return result;
1287}
1288
d7e2a1c1 1289/* Build a gimple reference operation with the given CODE and argument
83d5977e 1290 ARG, assigning the result to a new SSA name of TYPE with NAME.
d7e2a1c1
BS
1291 Insert the statement prior to GSI's current position, and return
1292 the fresh SSA name. */
1293
1294static inline tree
1295build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
83d5977e 1296 const char *name, enum tree_code code, tree arg0)
d7e2a1c1 1297{
83d5977e 1298 tree result = make_temp_ssa_name (type, NULL, name);
355fe088 1299 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
d7e2a1c1
BS
1300 gimple_set_location (stmt, loc);
1301 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1302 return result;
1303}
1304
83d5977e 1305/* Build a gimple assignment to cast VAL to TYPE. Insert the statement
5dfe80ba
AS
1306 prior to GSI's current position, and return the fresh SSA name. */
1307
1308static tree
1309build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
83d5977e 1310 tree type, tree val)
5dfe80ba 1311{
b731b390 1312 tree result = make_ssa_name (type);
0d0e4a03 1313 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
83d5977e
RG
1314 gimple_set_location (stmt, loc);
1315 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1316 return result;
5dfe80ba
AS
1317}
1318
b7dce216
KT
1319struct pow_synth_sqrt_info
1320{
1321 bool *factors;
1322 unsigned int deepest;
1323 unsigned int num_mults;
1324};
1325
1326/* Return true iff the real value C can be represented as a
1327 sum of powers of 0.5 up to N. That is:
1328 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1329 Record in INFO the various parameters of the synthesis algorithm such
1330 as the factors a[i], the maximum 0.5 power and the number of
1331 multiplications that will be required. */
1332
1333bool
1334representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1335 struct pow_synth_sqrt_info *info)
1336{
1337 REAL_VALUE_TYPE factor = dconsthalf;
1338 REAL_VALUE_TYPE remainder = c;
1339
1340 info->deepest = 0;
1341 info->num_mults = 0;
1342 memset (info->factors, 0, n * sizeof (bool));
1343
1344 for (unsigned i = 0; i < n; i++)
1345 {
1346 REAL_VALUE_TYPE res;
1347
1348 /* If something inexact happened bail out now. */
5c88ea94 1349 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
b7dce216
KT
1350 return false;
1351
1352 /* We have hit zero. The number is representable as a sum
1353 of powers of 0.5. */
624d31fe 1354 if (real_equal (&res, &dconst0))
b7dce216
KT
1355 {
1356 info->factors[i] = true;
1357 info->deepest = i + 1;
1358 return true;
1359 }
1360 else if (!REAL_VALUE_NEGATIVE (res))
1361 {
1362 remainder = res;
1363 info->factors[i] = true;
1364 info->num_mults++;
1365 }
1366 else
1367 info->factors[i] = false;
1368
5c88ea94 1369 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
b7dce216
KT
1370 }
1371 return false;
1372}
1373
1374/* Return the tree corresponding to FN being applied
1375 to ARG N times at GSI and LOC.
1376 Look up previous results from CACHE if need be.
1377 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1378
1379static tree
1380get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1381 tree fn, location_t loc, tree *cache)
1382{
1383 tree res = cache[n];
1384 if (!res)
1385 {
1386 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1387 res = build_and_insert_call (gsi, loc, fn, prev);
1388 cache[n] = res;
1389 }
1390
1391 return res;
1392}
1393
1394/* Print to STREAM the repeated application of function FNAME to ARG
1395 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1396 "foo (foo (x))". */
1397
1398static void
1399print_nested_fn (FILE* stream, const char *fname, const char* arg,
1400 unsigned int n)
1401{
1402 if (n == 0)
1403 fprintf (stream, "%s", arg);
1404 else
1405 {
1406 fprintf (stream, "%s (", fname);
1407 print_nested_fn (stream, fname, arg, n - 1);
1408 fprintf (stream, ")");
1409 }
1410}
1411
1412/* Print to STREAM the fractional sequence of sqrt chains
1413 applied to ARG, described by INFO. Used for the dump file. */
1414
1415static void
1416dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1417 struct pow_synth_sqrt_info *info)
1418{
1419 for (unsigned int i = 0; i < info->deepest; i++)
1420 {
1421 bool is_set = info->factors[i];
1422 if (is_set)
1423 {
1424 print_nested_fn (stream, "sqrt", arg, i + 1);
1425 if (i != info->deepest - 1)
1426 fprintf (stream, " * ");
1427 }
1428 }
1429}
1430
1431/* Print to STREAM a representation of raising ARG to an integer
1432 power N. Used for the dump file. */
1433
1434static void
1435dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1436{
1437 if (n > 1)
1438 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1439 else if (n == 1)
1440 fprintf (stream, "%s", arg);
1441}
1442
1443/* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1444 square roots. Place at GSI and LOC. Limit the maximum depth
1445 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1446 result of the expanded sequence or NULL_TREE if the expansion failed.
1447
1448 This routine assumes that ARG1 is a real number with a fractional part
1449 (the integer exponent case will have been handled earlier in
1450 gimple_expand_builtin_pow).
1451
1452 For ARG1 > 0.0:
1453 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1454 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1455 FRAC_PART == ARG1 - WHOLE_PART:
1456 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1457 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1458 if it can be expressed as such, that is if FRAC_PART satisfies:
1459 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1460 where integer a[i] is either 0 or 1.
1461
1462 Example:
1463 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1464 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1465
1466 For ARG1 < 0.0 there are two approaches:
1467 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1468 is calculated as above.
1469
1470 Example:
1471 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1472 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1473
1474 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1475 FRAC_PART := ARG1 - WHOLE_PART
1476 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1477 Example:
1478 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1479 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1480
1481 For ARG1 < 0.0 we choose between (A) and (B) depending on
1482 how many multiplications we'd have to do.
1483 So, for the example in (B): POW (x, -5.875), if we were to
1484 follow algorithm (A) we would produce:
1485 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1486 which contains more multiplications than approach (B).
1487
1488 Hopefully, this approach will eliminate potentially expensive POW library
1489 calls when unsafe floating point math is enabled and allow the compiler to
1490 further optimise the multiplies, square roots and divides produced by this
1491 function. */
1492
1493static tree
1494expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1495 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1496{
1497 tree type = TREE_TYPE (arg0);
1498 machine_mode mode = TYPE_MODE (type);
1499 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1500 bool one_over = true;
1501
1502 if (!sqrtfn)
1503 return NULL_TREE;
1504
1505 if (TREE_CODE (arg1) != REAL_CST)
1506 return NULL_TREE;
1507
1508 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1509
1510 gcc_assert (max_depth > 0);
1511 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1512
1513 struct pow_synth_sqrt_info synth_info;
1514 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1515 synth_info.deepest = 0;
1516 synth_info.num_mults = 0;
1517
1518 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1519 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1520
1521 /* The whole and fractional parts of exp. */
1522 REAL_VALUE_TYPE whole_part;
1523 REAL_VALUE_TYPE frac_part;
1524
1525 real_floor (&whole_part, mode, &exp);
5c88ea94 1526 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
b7dce216
KT
1527
1528
1529 REAL_VALUE_TYPE ceil_whole = dconst0;
1530 REAL_VALUE_TYPE ceil_fract = dconst0;
1531
1532 if (neg_exp)
1533 {
1534 real_ceil (&ceil_whole, mode, &exp);
5c88ea94 1535 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
b7dce216
KT
1536 }
1537
1538 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1539 return NULL_TREE;
1540
1541 /* Check whether it's more profitable to not use 1.0 / ... */
1542 if (neg_exp)
1543 {
1544 struct pow_synth_sqrt_info alt_synth_info;
1545 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1546 alt_synth_info.deepest = 0;
1547 alt_synth_info.num_mults = 0;
1548
1549 if (representable_as_half_series_p (ceil_fract, max_depth,
1550 &alt_synth_info)
1551 && alt_synth_info.deepest <= synth_info.deepest
1552 && alt_synth_info.num_mults < synth_info.num_mults)
1553 {
1554 whole_part = ceil_whole;
1555 frac_part = ceil_fract;
1556 synth_info.deepest = alt_synth_info.deepest;
1557 synth_info.num_mults = alt_synth_info.num_mults;
1558 memcpy (synth_info.factors, alt_synth_info.factors,
1559 (max_depth + 1) * sizeof (bool));
1560 one_over = false;
1561 }
1562 }
1563
1564 HOST_WIDE_INT n = real_to_integer (&whole_part);
1565 REAL_VALUE_TYPE cint;
1566 real_from_integer (&cint, VOIDmode, n, SIGNED);
1567
1568 if (!real_identical (&whole_part, &cint))
1569 return NULL_TREE;
1570
1571 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1572 return NULL_TREE;
1573
1574 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1575
1576 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1577
1578 /* Calculate the integer part of the exponent. */
1579 if (n > 1)
1580 {
1581 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1582 if (!integer_res)
1583 return NULL_TREE;
1584 }
1585
1586 if (dump_file)
1587 {
1588 char string[64];
1589
1590 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1591 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1592
1593 if (neg_exp)
1594 {
1595 if (one_over)
1596 {
1597 fprintf (dump_file, "1.0 / (");
1598 dump_integer_part (dump_file, "x", n);
1599 if (n > 0)
1600 fprintf (dump_file, " * ");
1601 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1602 fprintf (dump_file, ")");
1603 }
1604 else
1605 {
1606 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1607 fprintf (dump_file, " / (");
1608 dump_integer_part (dump_file, "x", n);
1609 fprintf (dump_file, ")");
1610 }
1611 }
1612 else
1613 {
1614 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1615 if (n > 0)
1616 fprintf (dump_file, " * ");
1617 dump_integer_part (dump_file, "x", n);
1618 }
1619
1620 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1621 }
1622
1623
1624 tree fract_res = NULL_TREE;
1625 cache[0] = arg0;
1626
1627 /* Calculate the fractional part of the exponent. */
1628 for (unsigned i = 0; i < synth_info.deepest; i++)
1629 {
1630 if (synth_info.factors[i])
1631 {
1632 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1633
1634 if (!fract_res)
1635 fract_res = sqrt_chain;
1636
1637 else
1638 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1639 fract_res, sqrt_chain);
1640 }
1641 }
1642
1643 tree res = NULL_TREE;
1644
1645 if (neg_exp)
1646 {
1647 if (one_over)
1648 {
1649 if (n > 0)
1650 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1651 fract_res, integer_res);
1652 else
1653 res = fract_res;
1654
1655 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1656 build_real (type, dconst1), res);
1657 }
1658 else
1659 {
1660 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1661 fract_res, integer_res);
1662 }
1663 }
1664 else
1665 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1666 fract_res, integer_res);
1667 return res;
1668}
1669
d24ad7d6
BS
1670/* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1671 with location info LOC. If possible, create an equivalent and
1672 less expensive sequence of statements prior to GSI, and return an
1673 expession holding the result. */
1674
1675static tree
1676gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1677 tree arg0, tree arg1)
1678{
b7dce216 1679 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
6e96f98a 1680 REAL_VALUE_TYPE c2, dconst3;
d24ad7d6 1681 HOST_WIDE_INT n;
b7dce216 1682 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
ef4bddc2 1683 machine_mode mode;
b7dce216 1684 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
0bfbca58 1685 bool hw_sqrt_exists, c_is_int, c2_is_int;
d24ad7d6 1686
b7dce216
KT
1687 dconst1_4 = dconst1;
1688 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1689
d24ad7d6
BS
1690 /* If the exponent isn't a constant, there's nothing of interest
1691 to be done. */
1692 if (TREE_CODE (arg1) != REAL_CST)
1693 return NULL_TREE;
1694
5a00b0aa
SS
1695 /* Don't perform the operation if flag_signaling_nans is on
1696 and the operand is a signaling NaN. */
1697 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
942a1319
JJ
1698 && ((TREE_CODE (arg0) == REAL_CST
1699 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
5a00b0aa
SS
1700 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1701 return NULL_TREE;
1702
ba869341
BS
1703 /* If the exponent is equivalent to an integer, expand to an optimal
1704 multiplication sequence when profitable. */
d24ad7d6
BS
1705 c = TREE_REAL_CST (arg1);
1706 n = real_to_integer (&c);
807e902e 1707 real_from_integer (&cint, VOIDmode, n, SIGNED);
0bfbca58 1708 c_is_int = real_identical (&c, &cint);
d24ad7d6 1709
0bfbca58 1710 if (c_is_int
d24ad7d6
BS
1711 && ((n >= -1 && n <= 2)
1712 || (flag_unsafe_math_optimizations
b7dce216 1713 && speed_p
d24ad7d6
BS
1714 && powi_cost (n) <= POWI_MAX_MULTS)))
1715 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1716
ba869341
BS
1717 /* Attempt various optimizations using sqrt and cbrt. */
1718 type = TREE_TYPE (arg0);
1719 mode = TYPE_MODE (type);
1720 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1721
1722 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1723 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1724 sqrt(-0) = -0. */
1725 if (sqrtfn
624d31fe 1726 && real_equal (&c, &dconsthalf)
ba869341 1727 && !HONOR_SIGNED_ZEROS (mode))
83d5977e 1728 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
ba869341 1729
d7e2a1c1 1730 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
ba869341 1731
ba869341
BS
1732 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1733 optimizations since 1./3. is not exactly representable. If x
1734 is negative and finite, the correct value of pow(x,1./3.) is
1735 a NaN with the "invalid" exception raised, because the value
1736 of 1./3. actually has an even denominator. The correct value
1737 of cbrt(x) is a negative real value. */
1738 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1739 dconst1_3 = real_value_truncate (mode, dconst_third ());
1740
1741 if (flag_unsafe_math_optimizations
1742 && cbrtfn
68e57f04 1743 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
624d31fe 1744 && real_equal (&c, &dconst1_3))
83d5977e 1745 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
ba869341
BS
1746
1747 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1748 if we don't have a hardware sqrt insn. */
1749 dconst1_6 = dconst1_3;
1750 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1751
1752 if (flag_unsafe_math_optimizations
1753 && sqrtfn
1754 && cbrtfn
68e57f04 1755 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
b7dce216 1756 && speed_p
ba869341 1757 && hw_sqrt_exists
624d31fe 1758 && real_equal (&c, &dconst1_6))
ba869341
BS
1759 {
1760 /* sqrt(x) */
83d5977e 1761 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
ba869341
BS
1762
1763 /* cbrt(sqrt(x)) */
83d5977e 1764 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
6e96f98a
BS
1765 }
1766
6e96f98a 1767
b7dce216
KT
1768 /* Attempt to expand the POW as a product of square root chains.
1769 Expand the 0.25 case even when otpimising for size. */
6e96f98a
BS
1770 if (flag_unsafe_math_optimizations
1771 && sqrtfn
b7dce216 1772 && hw_sqrt_exists
624d31fe 1773 && (speed_p || real_equal (&c, &dconst1_4))
b7dce216 1774 && !HONOR_SIGNED_ZEROS (mode))
6e96f98a 1775 {
b7dce216
KT
1776 unsigned int max_depth = speed_p
1777 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1778 : 2;
6e96f98a 1779
b7dce216
KT
1780 tree expand_with_sqrts
1781 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
6e96f98a 1782
b7dce216
KT
1783 if (expand_with_sqrts)
1784 return expand_with_sqrts;
6e96f98a
BS
1785 }
1786
b7dce216
KT
1787 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1788 n = real_to_integer (&c2);
1789 real_from_integer (&cint, VOIDmode, n, SIGNED);
1790 c2_is_int = real_identical (&c2, &cint);
1791
6e96f98a
BS
1792 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1793
1794 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1795 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1796
1797 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1798 different from pow(x, 1./3.) due to rounding and behavior with
1799 negative x, we need to constrain this transformation to unsafe
1800 math and positive x or finite math. */
807e902e 1801 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
6e96f98a
BS
1802 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1803 real_round (&c2, mode, &c2);
1804 n = real_to_integer (&c2);
807e902e 1805 real_from_integer (&cint, VOIDmode, n, SIGNED);
6e96f98a
BS
1806 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1807 real_convert (&c2, mode, &c2);
1808
1809 if (flag_unsafe_math_optimizations
1810 && cbrtfn
68e57f04 1811 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
6e96f98a 1812 && real_identical (&c2, &c)
0bfbca58 1813 && !c2_is_int
6e96f98a
BS
1814 && optimize_function_for_speed_p (cfun)
1815 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1816 {
1817 tree powi_x_ndiv3 = NULL_TREE;
1818
1819 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1820 possible or profitable, give up. Skip the degenerate case when
1821 abs(n) < 3, where the result is always 1. */
4c9cf7af 1822 if (absu_hwi (n) >= 3)
6e96f98a
BS
1823 {
1824 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
9f813990 1825 abs_hwi (n / 3));
6e96f98a
BS
1826 if (!powi_x_ndiv3)
1827 return NULL_TREE;
1828 }
1829
1830 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1831 as that creates an unnecessary variable. Instead, just produce
1832 either cbrt(x) or cbrt(x) * cbrt(x). */
83d5977e 1833 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
6e96f98a 1834
4c9cf7af 1835 if (absu_hwi (n) % 3 == 1)
6e96f98a
BS
1836 powi_cbrt_x = cbrt_x;
1837 else
83d5977e 1838 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
6e96f98a
BS
1839 cbrt_x, cbrt_x);
1840
1841 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
4c9cf7af 1842 if (absu_hwi (n) < 3)
6e96f98a
BS
1843 result = powi_cbrt_x;
1844 else
83d5977e 1845 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
6e96f98a
BS
1846 powi_x_ndiv3, powi_cbrt_x);
1847
1848 /* If n is negative, reciprocate the result. */
1849 if (n < 0)
83d5977e 1850 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
6e96f98a
BS
1851 build_real (type, dconst1), result);
1852
1853 return result;
ba869341
BS
1854 }
1855
6e96f98a 1856 /* No optimizations succeeded. */
d24ad7d6
BS
1857 return NULL_TREE;
1858}
1859
d7e2a1c1
BS
1860/* ARG is the argument to a cabs builtin call in GSI with location info
1861 LOC. Create a sequence of statements prior to GSI that calculates
1862 sqrt(R*R + I*I), where R and I are the real and imaginary components
1863 of ARG, respectively. Return an expression holding the result. */
1864
1865static tree
1866gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1867{
83d5977e 1868 tree real_part, imag_part, addend1, addend2, sum, result;
d7e2a1c1
BS
1869 tree type = TREE_TYPE (TREE_TYPE (arg));
1870 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
ef4bddc2 1871 machine_mode mode = TYPE_MODE (type);
d7e2a1c1
BS
1872
1873 if (!flag_unsafe_math_optimizations
1874 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1875 || !sqrtfn
1876 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1877 return NULL_TREE;
1878
83d5977e 1879 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
d7e2a1c1 1880 REALPART_EXPR, arg);
83d5977e 1881 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
d7e2a1c1 1882 real_part, real_part);
83d5977e 1883 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
d7e2a1c1 1884 IMAGPART_EXPR, arg);
83d5977e 1885 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
d7e2a1c1 1886 imag_part, imag_part);
83d5977e
RG
1887 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1888 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
d7e2a1c1
BS
1889
1890 return result;
1891}
1892
2f397a93 1893/* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
78be79d5
BS
1894 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1895 an optimal number of multiplies, when n is a constant. */
2f397a93 1896
be55bfe6
TS
1897namespace {
1898
1899const pass_data pass_data_cse_sincos =
1900{
1901 GIMPLE_PASS, /* type */
1902 "sincos", /* name */
1903 OPTGROUP_NONE, /* optinfo_flags */
1b6546cc 1904 TV_TREE_SINCOS, /* tv_id */
be55bfe6 1905 PROP_ssa, /* properties_required */
53f3cd25 1906 PROP_gimple_opt_math, /* properties_provided */
be55bfe6
TS
1907 0, /* properties_destroyed */
1908 0, /* todo_flags_start */
3bea341f 1909 TODO_update_ssa, /* todo_flags_finish */
be55bfe6
TS
1910};
1911
1912class pass_cse_sincos : public gimple_opt_pass
1913{
1914public:
1915 pass_cse_sincos (gcc::context *ctxt)
1916 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1917 {}
1918
1919 /* opt_pass methods: */
1920 virtual bool gate (function *)
1921 {
1922 /* We no longer require either sincos or cexp, since powi expansion
1923 piggybacks on this pass. */
1924 return optimize;
1925 }
1926
1927 virtual unsigned int execute (function *);
1928
1929}; // class pass_cse_sincos
1930
1931unsigned int
1932pass_cse_sincos::execute (function *fun)
2f397a93
RG
1933{
1934 basic_block bb;
90bc1cb8 1935 bool cfg_changed = false;
2f397a93
RG
1936
1937 calculate_dominance_info (CDI_DOMINATORS);
4da3b811 1938 memset (&sincos_stats, 0, sizeof (sincos_stats));
2f397a93 1939
be55bfe6 1940 FOR_EACH_BB_FN (bb, fun)
2f397a93 1941 {
726a989a 1942 gimple_stmt_iterator gsi;
3b9ee1cc 1943 bool cleanup_eh = false;
2f397a93 1944
726a989a 1945 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2f397a93 1946 {
355fe088 1947 gimple *stmt = gsi_stmt (gsi);
2f397a93 1948
3b9ee1cc
JJ
1949 /* Only the last stmt in a bb could throw, no need to call
1950 gimple_purge_dead_eh_edges if we change something in the middle
1951 of a basic block. */
1952 cleanup_eh = false;
1953
c97d1c9d 1954 if (is_gimple_call (stmt)
eada851c 1955 && gimple_call_lhs (stmt))
2f397a93 1956 {
78be79d5
BS
1957 tree arg, arg0, arg1, result;
1958 HOST_WIDE_INT n;
1959 location_t loc;
2f397a93 1960
c97d1c9d 1961 switch (gimple_call_combined_fn (stmt))
2f397a93 1962 {
c97d1c9d
RS
1963 CASE_CFN_COS:
1964 CASE_CFN_SIN:
1965 CASE_CFN_CEXPI:
fa65a9cf 1966 /* Make sure we have either sincos or cexp. */
d33d9e47
AI
1967 if (!targetm.libc_has_function (function_c99_math_complex)
1968 && !targetm.libc_has_function (function_sincos))
fa65a9cf
BS
1969 break;
1970
726a989a 1971 arg = gimple_call_arg (stmt, 0);
2f397a93 1972 if (TREE_CODE (arg) == SSA_NAME)
90bc1cb8 1973 cfg_changed |= execute_cse_sincos_1 (arg);
2f397a93
RG
1974 break;
1975
c97d1c9d 1976 CASE_CFN_POW:
d24ad7d6
BS
1977 arg0 = gimple_call_arg (stmt, 0);
1978 arg1 = gimple_call_arg (stmt, 1);
1979
1980 loc = gimple_location (stmt);
1981 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1982
1983 if (result)
1984 {
1985 tree lhs = gimple_get_lhs (stmt);
538dd0b7 1986 gassign *new_stmt = gimple_build_assign (lhs, result);
d24ad7d6
BS
1987 gimple_set_location (new_stmt, loc);
1988 unlink_stmt_vdef (stmt);
1989 gsi_replace (&gsi, new_stmt, true);
3b9ee1cc 1990 cleanup_eh = true;
3d3f2249
RG
1991 if (gimple_vdef (stmt))
1992 release_ssa_name (gimple_vdef (stmt));
d24ad7d6
BS
1993 }
1994 break;
1995
c97d1c9d 1996 CASE_CFN_POWI:
78be79d5
BS
1997 arg0 = gimple_call_arg (stmt, 0);
1998 arg1 = gimple_call_arg (stmt, 1);
78be79d5 1999 loc = gimple_location (stmt);
0fa6e0ef 2000
e3530904 2001 if (real_minus_onep (arg0))
0fa6e0ef
TB
2002 {
2003 tree t0, t1, cond, one, minus_one;
538dd0b7 2004 gassign *stmt;
0fa6e0ef
TB
2005
2006 t0 = TREE_TYPE (arg0);
2007 t1 = TREE_TYPE (arg1);
2008 one = build_real (t0, dconst1);
2009 minus_one = build_real (t0, dconstm1);
2010
2011 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
0d0e4a03
JJ
2012 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
2013 arg1, build_int_cst (t1, 1));
0fa6e0ef
TB
2014 gimple_set_location (stmt, loc);
2015 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2016
2017 result = make_temp_ssa_name (t0, NULL, "powi");
0d0e4a03
JJ
2018 stmt = gimple_build_assign (result, COND_EXPR, cond,
2019 minus_one, one);
0fa6e0ef
TB
2020 gimple_set_location (stmt, loc);
2021 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
2022 }
2023 else
2024 {
9541ffee 2025 if (!tree_fits_shwi_p (arg1))
daf4e940
TB
2026 break;
2027
eb1ce453 2028 n = tree_to_shwi (arg1);
0fa6e0ef
TB
2029 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
2030 }
78be79d5
BS
2031
2032 if (result)
2033 {
2034 tree lhs = gimple_get_lhs (stmt);
538dd0b7 2035 gassign *new_stmt = gimple_build_assign (lhs, result);
78be79d5 2036 gimple_set_location (new_stmt, loc);
d7e2a1c1
BS
2037 unlink_stmt_vdef (stmt);
2038 gsi_replace (&gsi, new_stmt, true);
3b9ee1cc 2039 cleanup_eh = true;
3d3f2249
RG
2040 if (gimple_vdef (stmt))
2041 release_ssa_name (gimple_vdef (stmt));
d7e2a1c1
BS
2042 }
2043 break;
2044
c97d1c9d 2045 CASE_CFN_CABS:
d7e2a1c1
BS
2046 arg0 = gimple_call_arg (stmt, 0);
2047 loc = gimple_location (stmt);
2048 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
2049
2050 if (result)
2051 {
2052 tree lhs = gimple_get_lhs (stmt);
538dd0b7 2053 gassign *new_stmt = gimple_build_assign (lhs, result);
d7e2a1c1 2054 gimple_set_location (new_stmt, loc);
78be79d5
BS
2055 unlink_stmt_vdef (stmt);
2056 gsi_replace (&gsi, new_stmt, true);
3b9ee1cc 2057 cleanup_eh = true;
3d3f2249
RG
2058 if (gimple_vdef (stmt))
2059 release_ssa_name (gimple_vdef (stmt));
78be79d5
BS
2060 }
2061 break;
2062
2f397a93
RG
2063 default:;
2064 }
2065 }
2066 }
3b9ee1cc
JJ
2067 if (cleanup_eh)
2068 cfg_changed |= gimple_purge_dead_eh_edges (bb);
2f397a93
RG
2069 }
2070
be55bfe6 2071 statistics_counter_event (fun, "sincos statements inserted",
4da3b811
NF
2072 sincos_stats.inserted);
2073
90bc1cb8 2074 return cfg_changed ? TODO_cleanup_cfg : 0;
2f397a93
RG
2075}
2076
27a4cd48
DM
2077} // anon namespace
2078
2079gimple_opt_pass *
2080make_pass_cse_sincos (gcc::context *ctxt)
2081{
2082 return new pass_cse_sincos (ctxt);
2083}
2084
7ab6a828
RE
2085/* Return true if stmt is a type conversion operation that can be stripped
2086 when used in a widening multiply operation. */
2087static bool
355fe088 2088widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
7ab6a828
RE
2089{
2090 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2091
2092 if (TREE_CODE (result_type) == INTEGER_TYPE)
2093 {
2094 tree op_type;
2095 tree inner_op_type;
2096
2097 if (!CONVERT_EXPR_CODE_P (rhs_code))
2098 return false;
2099
2100 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2101
2102 /* If the type of OP has the same precision as the result, then
2103 we can strip this conversion. The multiply operation will be
2104 selected to create the correct extension as a by-product. */
2105 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2106 return true;
2107
2108 /* We can also strip a conversion if it preserves the signed-ness of
2109 the operation and doesn't narrow the range. */
2110 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2111
e919e5bf
RE
2112 /* If the inner-most type is unsigned, then we can strip any
2113 intermediate widening operation. If it's signed, then the
2114 intermediate widening operation must also be signed. */
2115 if ((TYPE_UNSIGNED (inner_op_type)
2116 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
7ab6a828
RE
2117 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2118 return true;
2119
2120 return false;
2121 }
2122
2123 return rhs_code == FIXED_CONVERT_EXPR;
2124}
2125
26a855d7
AS
2126/* Return true if RHS is a suitable operand for a widening multiplication,
2127 assuming a target type of TYPE.
1a39adae
RS
2128 There are two cases:
2129
5dfe80ba
AS
2130 - RHS makes some value at least twice as wide. Store that value
2131 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
1a39adae
RS
2132
2133 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2134 but leave *TYPE_OUT untouched. */
0354c0c7
BS
2135
2136static bool
26a855d7
AS
2137is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2138 tree *new_rhs_out)
1a39adae 2139{
355fe088 2140 gimple *stmt;
26a855d7 2141 tree type1, rhs1;
1a39adae
RS
2142
2143 if (TREE_CODE (rhs) == SSA_NAME)
2144 {
1a39adae 2145 stmt = SSA_NAME_DEF_STMT (rhs);
26a855d7
AS
2146 if (is_gimple_assign (stmt))
2147 {
7ab6a828 2148 if (! widening_mult_conversion_strippable_p (type, stmt))
26a855d7
AS
2149 rhs1 = rhs;
2150 else
a6f969f4
AS
2151 {
2152 rhs1 = gimple_assign_rhs1 (stmt);
2153
2154 if (TREE_CODE (rhs1) == INTEGER_CST)
2155 {
2156 *new_rhs_out = rhs1;
2157 *type_out = NULL;
2158 return true;
2159 }
2160 }
26a855d7
AS
2161 }
2162 else
2163 rhs1 = rhs;
1a39adae 2164
1a39adae 2165 type1 = TREE_TYPE (rhs1);
26a855d7 2166
1a39adae 2167 if (TREE_CODE (type1) != TREE_CODE (type)
5dfe80ba 2168 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
1a39adae
RS
2169 return false;
2170
2171 *new_rhs_out = rhs1;
2172 *type_out = type1;
2173 return true;
2174 }
2175
2176 if (TREE_CODE (rhs) == INTEGER_CST)
2177 {
2178 *new_rhs_out = rhs;
2179 *type_out = NULL;
2180 return true;
2181 }
2182
2183 return false;
2184}
2185
26a855d7
AS
2186/* Return true if STMT performs a widening multiplication, assuming the
2187 output type is TYPE. If so, store the unwidened types of the operands
2188 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2189 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2190 and *TYPE2_OUT would give the operands of the multiplication. */
1a39adae
RS
2191
2192static bool
355fe088 2193is_widening_mult_p (gimple *stmt,
1a39adae
RS
2194 tree *type1_out, tree *rhs1_out,
2195 tree *type2_out, tree *rhs2_out)
0354c0c7 2196{
3d71881d
AS
2197 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2198
9b8e85a5
JJ
2199 if (TREE_CODE (type) == INTEGER_TYPE)
2200 {
2201 if (TYPE_OVERFLOW_TRAPS (type))
2202 return false;
2203 }
2204 else if (TREE_CODE (type) != FIXED_POINT_TYPE)
1a39adae 2205 return false;
0354c0c7 2206
26a855d7
AS
2207 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2208 rhs1_out))
0354c0c7
BS
2209 return false;
2210
26a855d7
AS
2211 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2212 rhs2_out))
1a39adae 2213 return false;
0354c0c7 2214
1a39adae 2215 if (*type1_out == NULL)
0354c0c7 2216 {
1a39adae 2217 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
0354c0c7 2218 return false;
1a39adae 2219 *type1_out = *type2_out;
0354c0c7 2220 }
0354c0c7 2221
1a39adae 2222 if (*type2_out == NULL)
0354c0c7 2223 {
1a39adae 2224 if (!int_fits_type_p (*rhs2_out, *type1_out))
0354c0c7 2225 return false;
1a39adae 2226 *type2_out = *type1_out;
0354c0c7 2227 }
0354c0c7 2228
ff63d754
AS
2229 /* Ensure that the larger of the two operands comes first. */
2230 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2231 {
fab27f52
MM
2232 std::swap (*type1_out, *type2_out);
2233 std::swap (*rhs1_out, *rhs2_out);
ff63d754 2234 }
5dfe80ba 2235
1a39adae
RS
2236 return true;
2237}
0354c0c7 2238
336a06a1
TC
2239/* Check to see if the CALL statement is an invocation of copysign
2240 with 1. being the first argument. */
2241static bool
2242is_copysign_call_with_1 (gimple *call)
2243{
2244 gcall *c = dyn_cast <gcall *> (call);
2245 if (! c)
2246 return false;
2247
2248 enum combined_fn code = gimple_call_combined_fn (c);
2249
2250 if (code == CFN_LAST)
2251 return false;
2252
2253 if (builtin_fn_p (code))
2254 {
2255 switch (as_builtin_fn (code))
2256 {
2257 CASE_FLT_FN (BUILT_IN_COPYSIGN):
2258 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN):
2259 return real_onep (gimple_call_arg (c, 0));
2260 default:
2261 return false;
2262 }
2263 }
2264
2265 if (internal_fn_p (code))
2266 {
2267 switch (as_internal_fn (code))
2268 {
2269 case IFN_COPYSIGN:
2270 return real_onep (gimple_call_arg (c, 0));
2271 default:
2272 return false;
2273 }
2274 }
2275
2276 return false;
2277}
2278
2279/* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2280 This only happens when the the xorsign optab is defined, if the
2281 pattern is not a xorsign pattern or if expansion fails FALSE is
2282 returned, otherwise TRUE is returned. */
2283static bool
2284convert_expand_mult_copysign (gimple *stmt, gimple_stmt_iterator *gsi)
2285{
2286 tree treeop0, treeop1, lhs, type;
2287 location_t loc = gimple_location (stmt);
2288 lhs = gimple_assign_lhs (stmt);
2289 treeop0 = gimple_assign_rhs1 (stmt);
2290 treeop1 = gimple_assign_rhs2 (stmt);
2291 type = TREE_TYPE (lhs);
2292 machine_mode mode = TYPE_MODE (type);
2293
9880acc1 2294 if (HONOR_SNANS (type))
336a06a1
TC
2295 return false;
2296
2297 if (TREE_CODE (treeop0) == SSA_NAME && TREE_CODE (treeop1) == SSA_NAME)
2298 {
2299 gimple *call0 = SSA_NAME_DEF_STMT (treeop0);
9880acc1 2300 if (!has_single_use (treeop0) || !is_copysign_call_with_1 (call0))
336a06a1
TC
2301 {
2302 call0 = SSA_NAME_DEF_STMT (treeop1);
9880acc1 2303 if (!has_single_use (treeop1) || !is_copysign_call_with_1 (call0))
336a06a1
TC
2304 return false;
2305
2306 treeop1 = treeop0;
2307 }
336a06a1
TC
2308 if (optab_handler (xorsign_optab, mode) == CODE_FOR_nothing)
2309 return false;
2310
2311 gcall *c = as_a<gcall*> (call0);
2312 treeop0 = gimple_call_arg (c, 1);
2313
2314 gcall *call_stmt
2315 = gimple_build_call_internal (IFN_XORSIGN, 2, treeop1, treeop0);
2316 gimple_set_lhs (call_stmt, lhs);
2317 gimple_set_location (call_stmt, loc);
2318 gsi_replace (gsi, call_stmt, true);
2319 return true;
2320 }
2321
2322 return false;
2323}
2324
1a39adae
RS
2325/* Process a single gimple statement STMT, which has a MULT_EXPR as
2326 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2327 value is true iff we converted the statement. */
2328
2329static bool
355fe088 2330convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
1a39adae 2331{
83d5977e 2332 tree lhs, rhs1, rhs2, type, type1, type2;
1a39adae 2333 enum insn_code handler;
4b926fea 2334 scalar_int_mode to_mode, from_mode, actual_mode;
a484f6ba 2335 optab op;
5dfe80ba
AS
2336 int actual_precision;
2337 location_t loc = gimple_location (stmt);
db719f50 2338 bool from_unsigned1, from_unsigned2;
1a39adae
RS
2339
2340 lhs = gimple_assign_lhs (stmt);
2341 type = TREE_TYPE (lhs);
2342 if (TREE_CODE (type) != INTEGER_TYPE)
0354c0c7
BS
2343 return false;
2344
3d71881d 2345 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
0354c0c7
BS
2346 return false;
2347
7a504f33
RS
2348 to_mode = SCALAR_INT_TYPE_MODE (type);
2349 from_mode = SCALAR_INT_TYPE_MODE (type1);
9134df2c
RS
2350 if (to_mode == from_mode)
2351 return false;
2352
db719f50
AS
2353 from_unsigned1 = TYPE_UNSIGNED (type1);
2354 from_unsigned2 = TYPE_UNSIGNED (type2);
a484f6ba 2355
db719f50 2356 if (from_unsigned1 && from_unsigned2)
a484f6ba 2357 op = umul_widen_optab;
db719f50 2358 else if (!from_unsigned1 && !from_unsigned2)
a484f6ba 2359 op = smul_widen_optab;
0354c0c7 2360 else
a484f6ba
AS
2361 op = usmul_widen_optab;
2362
5dfe80ba 2363 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
4b926fea 2364 &actual_mode);
1a39adae
RS
2365
2366 if (handler == CODE_FOR_nothing)
db719f50
AS
2367 {
2368 if (op != smul_widen_optab)
2369 {
6a228c2c
AS
2370 /* We can use a signed multiply with unsigned types as long as
2371 there is a wider mode to use, or it is the smaller of the two
2372 types that is unsigned. Note that type1 >= type2, always. */
2373 if ((TYPE_UNSIGNED (type1)
2374 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2375 || (TYPE_UNSIGNED (type2)
2376 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2377 {
490d0f6c
RS
2378 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2379 || GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
6a228c2c
AS
2380 return false;
2381 }
db719f50
AS
2382
2383 op = smul_widen_optab;
2384 handler = find_widening_optab_handler_and_mode (op, to_mode,
4b926fea 2385 from_mode,
db719f50
AS
2386 &actual_mode);
2387
2388 if (handler == CODE_FOR_nothing)
2389 return false;
2390
2391 from_unsigned1 = from_unsigned2 = false;
2392 }
2393 else
2394 return false;
2395 }
1a39adae 2396
5dfe80ba
AS
2397 /* Ensure that the inputs to the handler are in the correct precison
2398 for the opcode. This will be the full mode size. */
2399 actual_precision = GET_MODE_PRECISION (actual_mode);
f409d239
RG
2400 if (2 * actual_precision > TYPE_PRECISION (type))
2401 return false;
db719f50
AS
2402 if (actual_precision != TYPE_PRECISION (type1)
2403 || from_unsigned1 != TYPE_UNSIGNED (type1))
83d5977e
RG
2404 rhs1 = build_and_insert_cast (gsi, loc,
2405 build_nonstandard_integer_type
2406 (actual_precision, from_unsigned1), rhs1);
db719f50
AS
2407 if (actual_precision != TYPE_PRECISION (type2)
2408 || from_unsigned2 != TYPE_UNSIGNED (type2))
83d5977e
RG
2409 rhs2 = build_and_insert_cast (gsi, loc,
2410 build_nonstandard_integer_type
2411 (actual_precision, from_unsigned2), rhs2);
5dfe80ba 2412
a6f969f4
AS
2413 /* Handle constants. */
2414 if (TREE_CODE (rhs1) == INTEGER_CST)
2415 rhs1 = fold_convert (type1, rhs1);
2416 if (TREE_CODE (rhs2) == INTEGER_CST)
2417 rhs2 = fold_convert (type2, rhs2);
2418
5dfe80ba
AS
2419 gimple_assign_set_rhs1 (stmt, rhs1);
2420 gimple_assign_set_rhs2 (stmt, rhs2);
0354c0c7
BS
2421 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2422 update_stmt (stmt);
4da3b811 2423 widen_mul_stats.widen_mults_inserted++;
0354c0c7
BS
2424 return true;
2425}
2426
2427/* Process a single gimple statement STMT, which is found at the
2428 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2429 rhs (given by CODE), and try to convert it into a
2430 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2431 is true iff we converted the statement. */
2432
2433static bool
355fe088 2434convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
0354c0c7
BS
2435 enum tree_code code)
2436{
355fe088
TS
2437 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
2438 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
83d5977e 2439 tree type, type1, type2, optype;
0354c0c7
BS
2440 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2441 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2442 optab this_optab;
2443 enum tree_code wmult_code;
5dfe80ba 2444 enum insn_code handler;
4b926fea 2445 scalar_mode to_mode, from_mode, actual_mode;
5dfe80ba
AS
2446 location_t loc = gimple_location (stmt);
2447 int actual_precision;
db719f50 2448 bool from_unsigned1, from_unsigned2;
0354c0c7
BS
2449
2450 lhs = gimple_assign_lhs (stmt);
2451 type = TREE_TYPE (lhs);
1a39adae
RS
2452 if (TREE_CODE (type) != INTEGER_TYPE
2453 && TREE_CODE (type) != FIXED_POINT_TYPE)
0354c0c7
BS
2454 return false;
2455
2456 if (code == MINUS_EXPR)
2457 wmult_code = WIDEN_MULT_MINUS_EXPR;
2458 else
2459 wmult_code = WIDEN_MULT_PLUS_EXPR;
2460
0354c0c7
BS
2461 rhs1 = gimple_assign_rhs1 (stmt);
2462 rhs2 = gimple_assign_rhs2 (stmt);
2463
2464 if (TREE_CODE (rhs1) == SSA_NAME)
2465 {
2466 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2467 if (is_gimple_assign (rhs1_stmt))
2468 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2469 }
0354c0c7
BS
2470
2471 if (TREE_CODE (rhs2) == SSA_NAME)
2472 {
2473 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2474 if (is_gimple_assign (rhs2_stmt))
2475 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2476 }
0354c0c7 2477
cefb4d4f
AS
2478 /* Allow for one conversion statement between the multiply
2479 and addition/subtraction statement. If there are more than
2480 one conversions then we assume they would invalidate this
2481 transformation. If that's not the case then they should have
2482 been folded before now. */
2483 if (CONVERT_EXPR_CODE_P (rhs1_code))
2484 {
2485 conv1_stmt = rhs1_stmt;
2486 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2487 if (TREE_CODE (rhs1) == SSA_NAME)
2488 {
2489 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2490 if (is_gimple_assign (rhs1_stmt))
2491 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2492 }
2493 else
2494 return false;
2495 }
2496 if (CONVERT_EXPR_CODE_P (rhs2_code))
2497 {
2498 conv2_stmt = rhs2_stmt;
2499 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2500 if (TREE_CODE (rhs2) == SSA_NAME)
2501 {
2502 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2503 if (is_gimple_assign (rhs2_stmt))
2504 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2505 }
2506 else
2507 return false;
2508 }
2509
5dfe80ba
AS
2510 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2511 is_widening_mult_p, but we still need the rhs returns.
2512
2513 It might also appear that it would be sufficient to use the existing
2514 operands of the widening multiply, but that would limit the choice of
42917d01
YZ
2515 multiply-and-accumulate instructions.
2516
2517 If the widened-multiplication result has more than one uses, it is
2518 probably wiser not to do the conversion. */
5dfe80ba
AS
2519 if (code == PLUS_EXPR
2520 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
0354c0c7 2521 {
42917d01
YZ
2522 if (!has_single_use (rhs1)
2523 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2524 &type2, &mult_rhs2))
0354c0c7 2525 return false;
1a39adae 2526 add_rhs = rhs2;
cefb4d4f 2527 conv_stmt = conv1_stmt;
0354c0c7 2528 }
5dfe80ba 2529 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
0354c0c7 2530 {
42917d01
YZ
2531 if (!has_single_use (rhs2)
2532 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2533 &type2, &mult_rhs2))
0354c0c7 2534 return false;
1a39adae 2535 add_rhs = rhs1;
cefb4d4f 2536 conv_stmt = conv2_stmt;
0354c0c7 2537 }
0354c0c7
BS
2538 else
2539 return false;
2540
b397965c
RS
2541 to_mode = SCALAR_TYPE_MODE (type);
2542 from_mode = SCALAR_TYPE_MODE (type1);
9134df2c
RS
2543 if (to_mode == from_mode)
2544 return false;
2545
db719f50
AS
2546 from_unsigned1 = TYPE_UNSIGNED (type1);
2547 from_unsigned2 = TYPE_UNSIGNED (type2);
3752b2ab 2548 optype = type1;
5dfe80ba 2549
db719f50
AS
2550 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2551 if (from_unsigned1 != from_unsigned2)
2552 {
3752b2ab
RS
2553 if (!INTEGRAL_TYPE_P (type))
2554 return false;
6a228c2c
AS
2555 /* We can use a signed multiply with unsigned types as long as
2556 there is a wider mode to use, or it is the smaller of the two
2557 types that is unsigned. Note that type1 >= type2, always. */
2558 if ((from_unsigned1
2559 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2560 || (from_unsigned2
2561 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
db719f50 2562 {
490d0f6c
RS
2563 if (!GET_MODE_WIDER_MODE (from_mode).exists (&from_mode)
2564 || GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
6a228c2c 2565 return false;
db719f50 2566 }
6a228c2c
AS
2567
2568 from_unsigned1 = from_unsigned2 = false;
3752b2ab
RS
2569 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2570 false);
db719f50 2571 }
9eab7f91 2572
cefb4d4f
AS
2573 /* If there was a conversion between the multiply and addition
2574 then we need to make sure it fits a multiply-and-accumulate.
2575 The should be a single mode change which does not change the
2576 value. */
2577 if (conv_stmt)
2578 {
db719f50 2579 /* We use the original, unmodified data types for this. */
cefb4d4f
AS
2580 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2581 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2582 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2583 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2584
2585 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2586 {
2587 /* Conversion is a truncate. */
2588 if (TYPE_PRECISION (to_type) < data_size)
2589 return false;
2590 }
2591 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2592 {
2593 /* Conversion is an extend. Check it's the right sort. */
2594 if (TYPE_UNSIGNED (from_type) != is_unsigned
2595 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2596 return false;
2597 }
2598 /* else convert is a no-op for our purposes. */
2599 }
2600
9eab7f91
RS
2601 /* Verify that the machine can perform a widening multiply
2602 accumulate in this mode/signedness combination, otherwise
2603 this transformation is likely to pessimize code. */
db719f50 2604 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
5dfe80ba 2605 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
4b926fea 2606 from_mode, &actual_mode);
5dfe80ba
AS
2607
2608 if (handler == CODE_FOR_nothing)
9eab7f91
RS
2609 return false;
2610
5dfe80ba
AS
2611 /* Ensure that the inputs to the handler are in the correct precison
2612 for the opcode. This will be the full mode size. */
2613 actual_precision = GET_MODE_PRECISION (actual_mode);
db719f50
AS
2614 if (actual_precision != TYPE_PRECISION (type1)
2615 || from_unsigned1 != TYPE_UNSIGNED (type1))
83d5977e
RG
2616 mult_rhs1 = build_and_insert_cast (gsi, loc,
2617 build_nonstandard_integer_type
2618 (actual_precision, from_unsigned1),
2619 mult_rhs1);
db719f50
AS
2620 if (actual_precision != TYPE_PRECISION (type2)
2621 || from_unsigned2 != TYPE_UNSIGNED (type2))
83d5977e
RG
2622 mult_rhs2 = build_and_insert_cast (gsi, loc,
2623 build_nonstandard_integer_type
2624 (actual_precision, from_unsigned2),
2625 mult_rhs2);
0354c0c7 2626
75161d2c 2627 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
83d5977e 2628 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
75161d2c 2629
a6f969f4
AS
2630 /* Handle constants. */
2631 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
c3c5a1cc 2632 mult_rhs1 = fold_convert (type1, mult_rhs1);
a6f969f4 2633 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
c3c5a1cc 2634 mult_rhs2 = fold_convert (type2, mult_rhs2);
a6f969f4 2635
00d66391
JJ
2636 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
2637 add_rhs);
0354c0c7 2638 update_stmt (gsi_stmt (*gsi));
4da3b811 2639 widen_mul_stats.maccs_inserted++;
0354c0c7
BS
2640 return true;
2641}
2642
4a0d0ed2
MJ
2643/* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2644 OP2 and which we know is used in statements that can be, together with the
2645 multiplication, converted to FMAs, perform the transformation. */
2646
2647static void
2648convert_mult_to_fma_1 (tree mul_result, tree op1, tree op2)
2649{
2650 tree type = TREE_TYPE (mul_result);
2651 gimple *use_stmt;
2652 imm_use_iterator imm_iter;
c566cc9f 2653 gcall *fma_stmt;
4a0d0ed2
MJ
2654
2655 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2656 {
2657 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
4a0d0ed2
MJ
2658 tree addop, mulop1 = op1, result = mul_result;
2659 bool negate_p = false;
c566cc9f 2660 gimple_seq seq = NULL;
4a0d0ed2
MJ
2661
2662 if (is_gimple_debug (use_stmt))
2663 continue;
2664
0936858f
RS
2665 if (is_gimple_assign (use_stmt)
2666 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
4a0d0ed2
MJ
2667 {
2668 result = gimple_assign_lhs (use_stmt);
2669 use_operand_p use_p;
2670 gimple *neguse_stmt;
2671 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2672 gsi_remove (&gsi, true);
2673 release_defs (use_stmt);
2674
2675 use_stmt = neguse_stmt;
2676 gsi = gsi_for_stmt (use_stmt);
4a0d0ed2
MJ
2677 negate_p = true;
2678 }
2679
0936858f
RS
2680 tree cond, else_value, ops[3];
2681 tree_code code;
2682 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code,
2683 ops, &else_value))
2684 gcc_unreachable ();
2685 addop = ops[0] == result ? ops[1] : ops[0];
2686
2687 if (code == MINUS_EXPR)
4a0d0ed2 2688 {
0936858f
RS
2689 if (ops[0] == result)
2690 /* a * b - c -> a * b + (-c) */
c566cc9f 2691 addop = gimple_build (&seq, NEGATE_EXPR, type, addop);
0936858f
RS
2692 else
2693 /* a - b * c -> (-b) * c + a */
4a0d0ed2
MJ
2694 negate_p = !negate_p;
2695 }
2696
2697 if (negate_p)
c566cc9f 2698 mulop1 = gimple_build (&seq, NEGATE_EXPR, type, mulop1);
4a0d0ed2 2699
c566cc9f
RS
2700 if (seq)
2701 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
0936858f
RS
2702
2703 if (cond)
2704 fma_stmt = gimple_build_call_internal (IFN_COND_FMA, 5, cond, mulop1,
2705 op2, addop, else_value);
2706 else
2707 fma_stmt = gimple_build_call_internal (IFN_FMA, 3, mulop1, op2, addop);
2708 gimple_set_lhs (fma_stmt, gimple_get_lhs (use_stmt));
c566cc9f
RS
2709 gimple_call_set_nothrow (fma_stmt, !stmt_can_throw_internal (use_stmt));
2710 gsi_replace (&gsi, fma_stmt, true);
2711 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
2712 regardless of where the negation occurs. */
2713 if (fold_stmt (&gsi, follow_all_ssa_edges))
2714 update_stmt (gsi_stmt (gsi));
4a0d0ed2
MJ
2715
2716 if (dump_file && (dump_flags & TDF_DETAILS))
2717 {
2718 fprintf (dump_file, "Generated FMA ");
4af78ef8 2719 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, TDF_NONE);
4a0d0ed2
MJ
2720 fprintf (dump_file, "\n");
2721 }
2722
4a0d0ed2
MJ
2723 widen_mul_stats.fmas_inserted++;
2724 }
2725}
2726
2727/* Data necessary to perform the actual transformation from a multiplication
2728 and an addition to an FMA after decision is taken it should be done and to
2729 then delete the multiplication statement from the function IL. */
2730
2731struct fma_transformation_info
2732{
2733 gimple *mul_stmt;
2734 tree mul_result;
2735 tree op1;
2736 tree op2;
2737};
2738
2739/* Structure containing the current state of FMA deferring, i.e. whether we are
2740 deferring, whether to continue deferring, and all data necessary to come
2741 back and perform all deferred transformations. */
2742
2743class fma_deferring_state
2744{
2745public:
2746 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
2747 do any deferring. */
2748
2749 fma_deferring_state (bool perform_deferring)
2750 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL),
2751 m_last_result (NULL_TREE), m_deferring_p (perform_deferring) {}
2752
2753 /* List of FMA candidates for which we the transformation has been determined
2754 possible but we at this point in BB analysis we do not consider them
2755 beneficial. */
2756 auto_vec<fma_transformation_info, 8> m_candidates;
2757
2758 /* Set of results of multiplication that are part of an already deferred FMA
2759 candidates. */
2760 hash_set<tree> m_mul_result_set;
2761
2762 /* The PHI that supposedly feeds back result of a FMA to another over loop
2763 boundary. */
2764 gphi *m_initial_phi;
2765
2766 /* Result of the last produced FMA candidate or NULL if there has not been
2767 one. */
2768 tree m_last_result;
2769
2770 /* If true, deferring might still be profitable. If false, transform all
2771 candidates and no longer defer. */
2772 bool m_deferring_p;
2773};
2774
2775/* Transform all deferred FMA candidates and mark STATE as no longer
2776 deferring. */
2777
2778static void
2779cancel_fma_deferring (fma_deferring_state *state)
2780{
2781 if (!state->m_deferring_p)
2782 return;
2783
2784 for (unsigned i = 0; i < state->m_candidates.length (); i++)
2785 {
2786 if (dump_file && (dump_flags & TDF_DETAILS))
2787 fprintf (dump_file, "Generating deferred FMA\n");
2788
2789 const fma_transformation_info &fti = state->m_candidates[i];
2790 convert_mult_to_fma_1 (fti.mul_result, fti.op1, fti.op2);
2791
2792 gimple_stmt_iterator gsi = gsi_for_stmt (fti.mul_stmt);
2793 gsi_remove (&gsi, true);
2794 release_defs (fti.mul_stmt);
2795 }
2796 state->m_deferring_p = false;
2797}
2798
2799/* If OP is an SSA name defined by a PHI node, return the PHI statement.
2800 Otherwise return NULL. */
2801
2802static gphi *
2803result_of_phi (tree op)
2804{
2805 if (TREE_CODE (op) != SSA_NAME)
2806 return NULL;
2807
2808 return dyn_cast <gphi *> (SSA_NAME_DEF_STMT (op));
2809}
2810
2811/* After processing statements of a BB and recording STATE, return true if the
2812 initial phi is fed by the last FMA candidate result ore one such result from
2813 previously processed BBs marked in LAST_RESULT_SET. */
2814
2815static bool
2816last_fma_candidate_feeds_initial_phi (fma_deferring_state *state,
2817 hash_set<tree> *last_result_set)
2818{
2819 ssa_op_iter iter;
2820 use_operand_p use;
2821 FOR_EACH_PHI_ARG (use, state->m_initial_phi, iter, SSA_OP_USE)
2822 {
2823 tree t = USE_FROM_PTR (use);
2824 if (t == state->m_last_result
2825 || last_result_set->contains (t))
2826 return true;
2827 }
2828
2829 return false;
2830}
2831
4dbed5f6
RG
2832/* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2833 with uses in additions and subtractions to form fused multiply-add
4a0d0ed2
MJ
2834 operations. Returns true if successful and MUL_STMT should be removed.
2835
2836 If STATE indicates that we are deferring FMA transformation, that means
2837 that we do not produce FMAs for basic blocks which look like:
2838
2839 <bb 6>
2840 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
2841 _65 = _14 * _16;
2842 accumulator_66 = _65 + accumulator_111;
2843
2844 or its unrolled version, i.e. with several FMA candidates that feed result
2845 of one into the addend of another. Instead, we add them to a list in STATE
2846 and if we later discover an FMA candidate that is not part of such a chain,
2847 we go back and perform all deferred past candidates. */
16949072
RG
2848
2849static bool
4a0d0ed2
MJ
2850convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
2851 fma_deferring_state *state)
16949072 2852{
4dbed5f6 2853 tree mul_result = gimple_get_lhs (mul_stmt);
16949072 2854 tree type = TREE_TYPE (mul_result);
355fe088 2855 gimple *use_stmt, *neguse_stmt;
16949072
RG
2856 use_operand_p use_p;
2857 imm_use_iterator imm_iter;
2858
2859 if (FLOAT_TYPE_P (type)
2860 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2861 return false;
2862
2863 /* We don't want to do bitfield reduction ops. */
2864 if (INTEGRAL_TYPE_P (type)
9b8e85a5 2865 && (!type_has_mode_precision_p (type) || TYPE_OVERFLOW_TRAPS (type)))
16949072
RG
2866 return false;
2867
2868 /* If the target doesn't support it, don't generate it. We assume that
2869 if fma isn't available then fms, fnma or fnms are not either. */
c566cc9f
RS
2870 optimization_type opt_type = bb_optimization_type (gimple_bb (mul_stmt));
2871 if (!direct_internal_fn_supported_p (IFN_FMA, type, opt_type))
16949072
RG
2872 return false;
2873
0fb808ea
JJ
2874 /* If the multiplication has zero uses, it is kept around probably because
2875 of -fnon-call-exceptions. Don't optimize it away in that case,
2876 it is DCE job. */
2877 if (has_zero_uses (mul_result))
2878 return false;
2879
4a0d0ed2
MJ
2880 bool check_defer
2881 = (state->m_deferring_p
2882 && (tree_to_shwi (TYPE_SIZE (type))
2883 <= PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS)));
2884 bool defer = check_defer;
16949072
RG
2885 /* Make sure that the multiplication statement becomes dead after
2886 the transformation, thus that all uses are transformed to FMAs.
2887 This means we assume that an FMA operation has the same cost
2888 as an addition. */
2889 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2890 {
a5f09e73
RH
2891 tree result = mul_result;
2892 bool negate_p = false;
16949072
RG
2893
2894 use_stmt = USE_STMT (use_p);
2895
76b14c29
RG
2896 if (is_gimple_debug (use_stmt))
2897 continue;
2898
16949072
RG
2899 /* For now restrict this operations to single basic blocks. In theory
2900 we would want to support sinking the multiplication in
2901 m = a*b;
2902 if ()
2903 ma = m + c;
2904 else
2905 d = m;
2906 to form a fma in the then block and sink the multiplication to the
2907 else block. */
2908 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2909 return false;
2910
a5f09e73 2911 /* A negate on the multiplication leads to FNMA. */
0936858f
RS
2912 if (is_gimple_assign (use_stmt)
2913 && gimple_assign_rhs_code (use_stmt) == NEGATE_EXPR)
a5f09e73 2914 {
a758fd67 2915 ssa_op_iter iter;
dae957ae 2916 use_operand_p usep;
a758fd67 2917
a5f09e73
RH
2918 result = gimple_assign_lhs (use_stmt);
2919
2920 /* Make sure the negate statement becomes dead with this
2921 single transformation. */
2922 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2923 &use_p, &neguse_stmt))
2924 return false;
2925
a758fd67 2926 /* Make sure the multiplication isn't also used on that stmt. */
dae957ae
RG
2927 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2928 if (USE_FROM_PTR (usep) == mul_result)
a758fd67
RG
2929 return false;
2930
a5f09e73
RH
2931 /* Re-validate. */
2932 use_stmt = neguse_stmt;
2933 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2934 return false;
a5f09e73 2935
a5f09e73
RH
2936 negate_p = true;
2937 }
16949072 2938
0936858f
RS
2939 tree cond, else_value, ops[3];
2940 tree_code code;
2941 if (!can_interpret_as_conditional_op_p (use_stmt, &cond, &code, ops,
2942 &else_value))
2943 return false;
2944
2945 switch (code)
a5f09e73
RH
2946 {
2947 case MINUS_EXPR:
0936858f 2948 if (ops[1] == result)
a1d8aa4b
RH
2949 negate_p = !negate_p;
2950 break;
a5f09e73 2951 case PLUS_EXPR:
a5f09e73 2952 break;
a5f09e73
RH
2953 default:
2954 /* FMA can only be formed from PLUS and MINUS. */
2955 return false;
2956 }
16949072 2957
0936858f
RS
2958 if (cond)
2959 {
2960 if (cond == result || else_value == result)
2961 return false;
2962 if (!direct_internal_fn_supported_p (IFN_COND_FMA, type, opt_type))
2963 return false;
2964 }
2965
2966 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
2967 we'll visit later, we might be able to get a more profitable
2968 match with fnma.
ee8a9b7b
JR
2969 OTOH, if we don't, a negate / fma pair has likely lower latency
2970 that a mult / subtract pair. */
0936858f
RS
2971 if (code == MINUS_EXPR
2972 && !negate_p
2973 && ops[0] == result
c566cc9f 2974 && !direct_internal_fn_supported_p (IFN_FMS, type, opt_type)
0936858f
RS
2975 && direct_internal_fn_supported_p (IFN_FNMA, type, opt_type)
2976 && TREE_CODE (ops[1]) == SSA_NAME
2977 && has_single_use (ops[1]))
ee8a9b7b 2978 {
0936858f
RS
2979 gimple *stmt2 = SSA_NAME_DEF_STMT (ops[1]);
2980 if (is_gimple_assign (stmt2)
2981 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
2982 return false;
ee8a9b7b
JR
2983 }
2984
a5f09e73 2985 /* We can't handle a * b + a * b. */
0936858f 2986 if (ops[0] == ops[1])
4a0d0ed2
MJ
2987 return false;
2988 /* If deferring, make sure we are not looking at an instruction that
2989 wouldn't have existed if we were not. */
2990 if (state->m_deferring_p
0936858f
RS
2991 && (state->m_mul_result_set.contains (ops[0])
2992 || state->m_mul_result_set.contains (ops[1])))
a5f09e73 2993 return false;
a1d8aa4b 2994
4a0d0ed2 2995 if (check_defer)
a5f09e73 2996 {
0936858f 2997 tree use_lhs = gimple_get_lhs (use_stmt);
4a0d0ed2
MJ
2998 if (state->m_last_result)
2999 {
0936858f
RS
3000 if (ops[1] == state->m_last_result
3001 || ops[0] == state->m_last_result)
4a0d0ed2
MJ
3002 defer = true;
3003 else
3004 defer = false;
3005 }
3006 else
3007 {
3008 gcc_checking_assert (!state->m_initial_phi);
3009 gphi *phi;
0936858f
RS
3010 if (ops[0] == result)
3011 phi = result_of_phi (ops[1]);
4a0d0ed2
MJ
3012 else
3013 {
0936858f
RS
3014 gcc_assert (ops[1] == result);
3015 phi = result_of_phi (ops[0]);
4a0d0ed2 3016 }
a5f09e73 3017
4a0d0ed2
MJ
3018 if (phi)
3019 {
3020 state->m_initial_phi = phi;
3021 defer = true;
3022 }
3023 else
3024 defer = false;
3025 }
a5f09e73 3026
4a0d0ed2
MJ
3027 state->m_last_result = use_lhs;
3028 check_defer = false;
16949072
RG
3029 }
3030 else
4a0d0ed2
MJ
3031 defer = false;
3032
3033 /* While it is possible to validate whether or not the exact form that
3034 we've recognized is available in the backend, the assumption is that
3035 if the deferring logic above did not trigger, the transformation is
3036 never a loss. For instance, suppose the target only has the plain FMA
3037 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3038 MUL+SUB for FMA+NEG, which is still two operations. Consider
3039 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3040 form the two NEGs are independent and could be run in parallel. */
3041 }
3042
3043 if (defer)
3044 {
3045 fma_transformation_info fti;
3046 fti.mul_stmt = mul_stmt;
3047 fti.mul_result = mul_result;
3048 fti.op1 = op1;
3049 fti.op2 = op2;
3050 state->m_candidates.safe_push (fti);
3051 state->m_mul_result_set.add (mul_result);
3052
3053 if (dump_file && (dump_flags & TDF_DETAILS))
16949072 3054 {
4a0d0ed2 3055 fprintf (dump_file, "Deferred generating FMA for multiplication ");
4af78ef8 3056 print_gimple_stmt (dump_file, mul_stmt, 0, TDF_NONE);
4a0d0ed2 3057 fprintf (dump_file, "\n");
16949072
RG
3058 }
3059
4a0d0ed2
MJ
3060 return false;
3061 }
3062 else
3063 {
3064 if (state->m_deferring_p)
3065 cancel_fma_deferring (state);
3066 convert_mult_to_fma_1 (mul_result, op1, op2);
3067 return true;
16949072 3068 }
16949072
RG
3069}
3070
6837d899
JJ
3071
3072/* Helper function of match_uaddsub_overflow. Return 1
3073 if USE_STMT is unsigned overflow check ovf != 0 for
3074 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3075 and 0 otherwise. */
3076
3077static int
3078uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3079{
3080 enum tree_code ccode = ERROR_MARK;
3081 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3082 if (gimple_code (use_stmt) == GIMPLE_COND)
3083 {
3084 ccode = gimple_cond_code (use_stmt);
3085 crhs1 = gimple_cond_lhs (use_stmt);
3086 crhs2 = gimple_cond_rhs (use_stmt);
3087 }
3088 else if (is_gimple_assign (use_stmt))
3089 {
3090 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3091 {
3092 ccode = gimple_assign_rhs_code (use_stmt);
3093 crhs1 = gimple_assign_rhs1 (use_stmt);
3094 crhs2 = gimple_assign_rhs2 (use_stmt);
3095 }
3096 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3097 {
3098 tree cond = gimple_assign_rhs1 (use_stmt);
3099 if (COMPARISON_CLASS_P (cond))
3100 {
3101 ccode = TREE_CODE (cond);
3102 crhs1 = TREE_OPERAND (cond, 0);
3103 crhs2 = TREE_OPERAND (cond, 1);
3104 }
3105 else
3106 return 0;
3107 }
3108 else
3109 return 0;
3110 }
3111 else
3112 return 0;
3113
3114 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3115 return 0;
3116
3117 enum tree_code code = gimple_assign_rhs_code (stmt);
3118 tree lhs = gimple_assign_lhs (stmt);
3119 tree rhs1 = gimple_assign_rhs1 (stmt);
3120 tree rhs2 = gimple_assign_rhs2 (stmt);
3121
3122 switch (ccode)
3123 {
3124 case GT_EXPR:
3125 case LE_EXPR:
3126 /* r = a - b; r > a or r <= a
3127 r = a + b; a > r or a <= r or b > r or b <= r. */
3128 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3129 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3130 && crhs2 == lhs))
3131 return ccode == GT_EXPR ? 1 : -1;
3132 break;
3133 case LT_EXPR:
3134 case GE_EXPR:
3135 /* r = a - b; a < r or a >= r
3136 r = a + b; r < a or r >= a or r < b or r >= b. */
3137 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3138 || (code == PLUS_EXPR && crhs1 == lhs
3139 && (crhs2 == rhs1 || crhs2 == rhs2)))
3140 return ccode == LT_EXPR ? 1 : -1;
3141 break;
3142 default:
3143 break;
3144 }
3145 return 0;
3146}
3147
3148/* Recognize for unsigned x
3149 x = y - z;
3150 if (x > y)
3151 where there are other uses of x and replace it with
3152 _7 = SUB_OVERFLOW (y, z);
3153 x = REALPART_EXPR <_7>;
3154 _8 = IMAGPART_EXPR <_7>;
3155 if (_8)
3156 and similarly for addition. */
3157
3158static bool
3159match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3160 enum tree_code code)
3161{
3162 tree lhs = gimple_assign_lhs (stmt);
3163 tree type = TREE_TYPE (lhs);
3164 use_operand_p use_p;
3165 imm_use_iterator iter;
3166 bool use_seen = false;
3167 bool ovf_use_seen = false;
3168 gimple *use_stmt;
3169
3170 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3171 if (!INTEGRAL_TYPE_P (type)
3172 || !TYPE_UNSIGNED (type)
3173 || has_zero_uses (lhs)
3174 || has_single_use (lhs)
3175 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3176 TYPE_MODE (type)) == CODE_FOR_nothing)
3177 return false;
3178
3179 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3180 {
3181 use_stmt = USE_STMT (use_p);
3182 if (is_gimple_debug (use_stmt))
3183 continue;
3184
3185 if (uaddsub_overflow_check_p (stmt, use_stmt))
3186 ovf_use_seen = true;
3187 else
3188 use_seen = true;
3189 if (ovf_use_seen && use_seen)
3190 break;
3191 }
3192
3193 if (!ovf_use_seen || !use_seen)
3194 return false;
3195
3196 tree ctype = build_complex_type (type);
3197 tree rhs1 = gimple_assign_rhs1 (stmt);
3198 tree rhs2 = gimple_assign_rhs2 (stmt);
3199 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3200 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3201 2, rhs1, rhs2);
3202 tree ctmp = make_ssa_name (ctype);
3203 gimple_call_set_lhs (g, ctmp);
3204 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3205 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3206 build1 (REALPART_EXPR, type, ctmp));
3207 gsi_replace (gsi, g2, true);
3208 tree ovf = make_ssa_name (type);
3209 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3210 build1 (IMAGPART_EXPR, type, ctmp));
3211 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3212
3213 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3214 {
3215 if (is_gimple_debug (use_stmt))
3216 continue;
3217
3218 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3219 if (ovf_use == 0)
3220 continue;
3221 if (gimple_code (use_stmt) == GIMPLE_COND)
3222 {
3223 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3224 gimple_cond_set_lhs (cond_stmt, ovf);
3225 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3226 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3227 }
3228 else
3229 {
3230 gcc_checking_assert (is_gimple_assign (use_stmt));
3231 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3232 {
3233 gimple_assign_set_rhs1 (use_stmt, ovf);
3234 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3235 gimple_assign_set_rhs_code (use_stmt,
3236 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3237 }
3238 else
3239 {
3240 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3241 == COND_EXPR);
3242 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3243 boolean_type_node, ovf,
3244 build_int_cst (type, 0));
3245 gimple_assign_set_rhs1 (use_stmt, cond);
3246 }
3247 }
3248 update_stmt (use_stmt);
3249 }
3250 return true;
3251}
3252
e72531b9
PK
3253/* Return true if target has support for divmod. */
3254
3255static bool
3256target_supports_divmod_p (optab divmod_optab, optab div_optab, machine_mode mode)
3257{
3258 /* If target supports hardware divmod insn, use it for divmod. */
3259 if (optab_handler (divmod_optab, mode) != CODE_FOR_nothing)
3260 return true;
3261
3262 /* Check if libfunc for divmod is available. */
3263 rtx libfunc = optab_libfunc (divmod_optab, mode);
3264 if (libfunc != NULL_RTX)
3265 {
3266 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3267 we don't want to use the libfunc even if it exists for given mode. */
c94843d2
RS
3268 machine_mode div_mode;
3269 FOR_EACH_MODE_FROM (div_mode, mode)
e72531b9
PK
3270 if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
3271 return false;
3272
3273 return targetm.expand_divmod_libfunc != NULL;
3274 }
3275
3276 return false;
3277}
3278
3279/* Check if stmt is candidate for divmod transform. */
3280
3281static bool
3282divmod_candidate_p (gassign *stmt)
3283{
3284 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
b8506a8a 3285 machine_mode mode = TYPE_MODE (type);
e72531b9
PK
3286 optab divmod_optab, div_optab;
3287
3288 if (TYPE_UNSIGNED (type))
3289 {
3290 divmod_optab = udivmod_optab;
3291 div_optab = udiv_optab;
3292 }
3293 else
3294 {
3295 divmod_optab = sdivmod_optab;
3296 div_optab = sdiv_optab;
3297 }
3298
3299 tree op1 = gimple_assign_rhs1 (stmt);
3300 tree op2 = gimple_assign_rhs2 (stmt);
3301
3302 /* Disable the transform if either is a constant, since division-by-constant
3303 may have specialized expansion. */
3304 if (CONSTANT_CLASS_P (op1) || CONSTANT_CLASS_P (op2))
3305 return false;
3306
3307 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3308 expand using the [su]divv optabs. */
3309 if (TYPE_OVERFLOW_TRAPS (type))
3310 return false;
3311
3312 if (!target_supports_divmod_p (divmod_optab, div_optab, mode))
3313 return false;
3314
3315 return true;
3316}
3317
3318/* This function looks for:
3319 t1 = a TRUNC_DIV_EXPR b;
3320 t2 = a TRUNC_MOD_EXPR b;
3321 and transforms it to the following sequence:
3322 complex_tmp = DIVMOD (a, b);
3323 t1 = REALPART_EXPR(a);
3324 t2 = IMAGPART_EXPR(b);
3325 For conditions enabling the transform see divmod_candidate_p().
3326
3327 The pass has three parts:
3328 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3329 other trunc_div_expr and trunc_mod_expr stmts.
3330 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3331 to stmts vector.
3332 3) Insert DIVMOD call just before top_stmt and update entries in
3333 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3334 IMAGPART_EXPR for mod). */
3335
3336static bool
3337convert_to_divmod (gassign *stmt)
3338{
3339 if (stmt_can_throw_internal (stmt)
3340 || !divmod_candidate_p (stmt))
3341 return false;
3342
3343 tree op1 = gimple_assign_rhs1 (stmt);
3344 tree op2 = gimple_assign_rhs2 (stmt);
3345
3346 imm_use_iterator use_iter;
3347 gimple *use_stmt;
3348 auto_vec<gimple *> stmts;
3349
3350 gimple *top_stmt = stmt;
3351 basic_block top_bb = gimple_bb (stmt);
3352
3353 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3354 at-least stmt and possibly other trunc_div/trunc_mod stmts
3355 having same operands as stmt. */
3356
3357 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, op1)
3358 {
3359 if (is_gimple_assign (use_stmt)
3360 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3361 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3362 && operand_equal_p (op1, gimple_assign_rhs1 (use_stmt), 0)
3363 && operand_equal_p (op2, gimple_assign_rhs2 (use_stmt), 0))
3364 {
3365 if (stmt_can_throw_internal (use_stmt))
3366 continue;
3367
3368 basic_block bb = gimple_bb (use_stmt);
3369
3370 if (bb == top_bb)
3371 {
3372 if (gimple_uid (use_stmt) < gimple_uid (top_stmt))
3373 top_stmt = use_stmt;
3374 }
3375 else if (dominated_by_p (CDI_DOMINATORS, top_bb, bb))
3376 {
3377 top_bb = bb;
3378 top_stmt = use_stmt;
3379 }
3380 }
3381 }
3382
3383 tree top_op1 = gimple_assign_rhs1 (top_stmt);
3384 tree top_op2 = gimple_assign_rhs2 (top_stmt);
3385
3386 stmts.safe_push (top_stmt);
3387 bool div_seen = (gimple_assign_rhs_code (top_stmt) == TRUNC_DIV_EXPR);
3388
3389 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
3390 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
3391 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
3392 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
3393
3394 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, top_op1)
3395 {
3396 if (is_gimple_assign (use_stmt)
3397 && (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR
3398 || gimple_assign_rhs_code (use_stmt) == TRUNC_MOD_EXPR)
3399 && operand_equal_p (top_op1, gimple_assign_rhs1 (use_stmt), 0)
3400 && operand_equal_p (top_op2, gimple_assign_rhs2 (use_stmt), 0))
3401 {
3402 if (use_stmt == top_stmt
3403 || stmt_can_throw_internal (use_stmt)
3404 || !dominated_by_p (CDI_DOMINATORS, gimple_bb (use_stmt), top_bb))
3405 continue;
3406
3407 stmts.safe_push (use_stmt);
3408 if (gimple_assign_rhs_code (use_stmt) == TRUNC_DIV_EXPR)
3409 div_seen = true;
3410 }
3411 }
3412
3413 if (!div_seen)
3414 return false;
3415
3416 /* Part 3: Create libcall to internal fn DIVMOD:
3417 divmod_tmp = DIVMOD (op1, op2). */
3418
3419 gcall *call_stmt = gimple_build_call_internal (IFN_DIVMOD, 2, op1, op2);
3420 tree res = make_temp_ssa_name (build_complex_type (TREE_TYPE (op1)),
3421 call_stmt, "divmod_tmp");
3422 gimple_call_set_lhs (call_stmt, res);
a844293d
RS
3423 /* We rejected throwing statements above. */
3424 gimple_call_set_nothrow (call_stmt, true);
e72531b9
PK
3425
3426 /* Insert the call before top_stmt. */
3427 gimple_stmt_iterator top_stmt_gsi = gsi_for_stmt (top_stmt);
3428 gsi_insert_before (&top_stmt_gsi, call_stmt, GSI_SAME_STMT);
3429
3430 widen_mul_stats.divmod_calls_inserted++;
3431
3432 /* Update all statements in stmts vector:
3433 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
3434 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
3435
3436 for (unsigned i = 0; stmts.iterate (i, &use_stmt); ++i)
3437 {
3438 tree new_rhs;
3439
3440 switch (gimple_assign_rhs_code (use_stmt))
3441 {
3442 case TRUNC_DIV_EXPR:
3443 new_rhs = fold_build1 (REALPART_EXPR, TREE_TYPE (op1), res);
3444 break;
3445
3446 case TRUNC_MOD_EXPR:
3447 new_rhs = fold_build1 (IMAGPART_EXPR, TREE_TYPE (op1), res);
3448 break;
3449
3450 default:
3451 gcc_unreachable ();
3452 }
3453
3454 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3455 gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
3456 update_stmt (use_stmt);
3457 }
3458
3459 return true;
3460}
6837d899 3461
5b58b39b
BS
3462/* Find integer multiplications where the operands are extended from
3463 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3464 where appropriate. */
3465
be55bfe6
TS
3466namespace {
3467
3468const pass_data pass_data_optimize_widening_mul =
3469{
3470 GIMPLE_PASS, /* type */
3471 "widening_mul", /* name */
3472 OPTGROUP_NONE, /* optinfo_flags */
1b6546cc 3473 TV_TREE_WIDEN_MUL, /* tv_id */
be55bfe6
TS
3474 PROP_ssa, /* properties_required */
3475 0, /* properties_provided */
3476 0, /* properties_destroyed */
3477 0, /* todo_flags_start */
3bea341f 3478 TODO_update_ssa, /* todo_flags_finish */
be55bfe6
TS
3479};
3480
3481class pass_optimize_widening_mul : public gimple_opt_pass
3482{
3483public:
3484 pass_optimize_widening_mul (gcc::context *ctxt)
3485 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3486 {}
3487
3488 /* opt_pass methods: */
3489 virtual bool gate (function *)
3490 {
3491 return flag_expensive_optimizations && optimize;
3492 }
3493
3494 virtual unsigned int execute (function *);
3495
3496}; // class pass_optimize_widening_mul
3497
4a0d0ed2
MJ
3498/* Walker class to perform the transformation in reverse dominance order. */
3499
3500class math_opts_dom_walker : public dom_walker
5b58b39b 3501{
4a0d0ed2
MJ
3502public:
3503 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
3504 if walking modidifes the CFG. */
5b58b39b 3505
4a0d0ed2
MJ
3506 math_opts_dom_walker (bool *cfg_changed_p)
3507 : dom_walker (CDI_DOMINATORS), m_last_result_set (),
3508 m_cfg_changed_p (cfg_changed_p) {}
4da3b811 3509
4a0d0ed2
MJ
3510 /* The actual actions performed in the walk. */
3511
3512 virtual void after_dom_children (basic_block);
3513
3514 /* Set of results of chains of multiply and add statement combinations that
3515 were not transformed into FMAs because of active deferring. */
3516 hash_set<tree> m_last_result_set;
3517
3518 /* Pointer to a flag of the user that needs to be set if CFG has been
3519 modified. */
3520 bool *m_cfg_changed_p;
3521};
3522
3523void
3524math_opts_dom_walker::after_dom_children (basic_block bb)
3525{
3526 gimple_stmt_iterator gsi;
3527
3528 fma_deferring_state fma_state (PARAM_VALUE (PARAM_AVOID_FMA_MAX_BITS) > 0);
3529
3530 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
5b58b39b 3531 {
4a0d0ed2
MJ
3532 gimple *stmt = gsi_stmt (gsi);
3533 enum tree_code code;
5b58b39b 3534
4a0d0ed2
MJ
3535 if (is_gimple_assign (stmt))
3536 {
3537 code = gimple_assign_rhs_code (stmt);
3538 switch (code)
3539 {
3540 case MULT_EXPR:
3541 if (!convert_mult_to_widen (stmt, &gsi)
3542 && !convert_expand_mult_copysign (stmt, &gsi)
3543 && convert_mult_to_fma (stmt,
3544 gimple_assign_rhs1 (stmt),
3545 gimple_assign_rhs2 (stmt),
3546 &fma_state))
3547 {
3548 gsi_remove (&gsi, true);
3549 release_defs (stmt);
3550 continue;
3551 }
3552 break;
3553
3554 case PLUS_EXPR:
3555 case MINUS_EXPR:
3556 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3557 match_uaddsub_overflow (&gsi, stmt, code);
3558 break;
5b58b39b 3559
4a0d0ed2
MJ
3560 case TRUNC_MOD_EXPR:
3561 convert_to_divmod (as_a<gassign *> (stmt));
3562 break;
3563
3564 default:;
3565 }
3566 }
3567 else if (is_gimple_call (stmt))
3568 {
3569 tree fndecl = gimple_call_fndecl (stmt);
3570 if (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
16949072 3571 {
4a0d0ed2 3572 switch (DECL_FUNCTION_CODE (fndecl))
16949072 3573 {
4a0d0ed2
MJ
3574 case BUILT_IN_POWF:
3575 case BUILT_IN_POW:
3576 case BUILT_IN_POWL:
3577 if (gimple_call_lhs (stmt)
3578 && TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3579 && real_equal
3580 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3581 &dconst2)
4dbed5f6 3582 && convert_mult_to_fma (stmt,
4a0d0ed2
MJ
3583 gimple_call_arg (stmt, 0),
3584 gimple_call_arg (stmt, 0),
3585 &fma_state))
16949072 3586 {
4a0d0ed2
MJ
3587 unlink_stmt_vdef (stmt);
3588 if (gsi_remove (&gsi, true)
3589 && gimple_purge_dead_eh_edges (bb))
3590 *m_cfg_changed_p = true;
16949072
RG
3591 release_defs (stmt);
3592 continue;
3593 }
3594 break;
3595
16949072
RG
3596 default:;
3597 }
3598 }
4a0d0ed2
MJ
3599 else
3600 cancel_fma_deferring (&fma_state);
5b58b39b 3601 }
4a0d0ed2 3602 gsi_next (&gsi);
5b58b39b 3603 }
4a0d0ed2
MJ
3604 if (fma_state.m_deferring_p
3605 && fma_state.m_initial_phi)
3606 {
3607 gcc_checking_assert (fma_state.m_last_result);
3608 if (!last_fma_candidate_feeds_initial_phi (&fma_state,
3609 &m_last_result_set))
3610 cancel_fma_deferring (&fma_state);
3611 else
3612 m_last_result_set.add (fma_state.m_last_result);
3613 }
3614}
3615
3616
3617unsigned int
3618pass_optimize_widening_mul::execute (function *fun)
3619{
3620 bool cfg_changed = false;
3621
3622 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3623 calculate_dominance_info (CDI_DOMINATORS);
3624 renumber_gimple_stmt_uids ();
3625
3626 math_opts_dom_walker (&cfg_changed).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
0354c0c7 3627
be55bfe6 3628 statistics_counter_event (fun, "widening multiplications inserted",
4da3b811 3629 widen_mul_stats.widen_mults_inserted);
be55bfe6 3630 statistics_counter_event (fun, "widening maccs inserted",
4da3b811 3631 widen_mul_stats.maccs_inserted);
be55bfe6 3632 statistics_counter_event (fun, "fused multiply-adds inserted",
4da3b811 3633 widen_mul_stats.fmas_inserted);
e72531b9
PK
3634 statistics_counter_event (fun, "divmod calls inserted",
3635 widen_mul_stats.divmod_calls_inserted);
4da3b811 3636
4dbed5f6 3637 return cfg_changed ? TODO_cleanup_cfg : 0;
5b58b39b
BS
3638}
3639
27a4cd48
DM
3640} // anon namespace
3641
3642gimple_opt_pass *
3643make_pass_optimize_widening_mul (gcc::context *ctxt)
3644{
3645 return new pass_optimize_widening_mul (ctxt);
3646}