]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-ivopts.cc
Revert "sphinx: support Sphinx in build system"
[thirdparty/gcc.git] / gcc / tree-ssa-loop-ivopts.cc
1 /* Induction variable optimizations.
2 Copyright (C) 2003-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
25 following steps:
26
27 1) The interesting uses of induction variables are found. This includes
28
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
32
33 Note the interesting uses are categorized and handled in group.
34 Generally, address type uses are grouped together if their iv bases
35 are different in constant offset.
36
37 2) Candidates for the induction variables are found. This includes
38
39 -- old induction variables
40 -- the variables defined by expressions derived from the "interesting
41 groups/uses" above
42
43 3) The optimal (w.r. to a cost function) set of variables is chosen. The
44 cost function assigns a cost to sets of induction variables and consists
45 of three parts:
46
47 -- The group/use costs. Each of the interesting groups/uses chooses
48 the best induction variable in the set and adds its cost to the sum.
49 The cost reflects the time spent on modifying the induction variables
50 value to be usable for the given purpose (adding base and offset for
51 arrays, etc.).
52 -- The variable costs. Each of the variables has a cost assigned that
53 reflects the costs associated with incrementing the value of the
54 variable. The original variables are somewhat preferred.
55 -- The set cost. Depending on the size of the set, extra cost may be
56 added to reflect register pressure.
57
58 All the costs are defined in a machine-specific way, using the target
59 hooks and machine descriptions to determine them.
60
61 4) The trees are transformed to use the new variables, the dead code is
62 removed.
63
64 All of this is done loop by loop. Doing it globally is theoretically
65 possible, it might give a better performance and it might enable us
66 to decide costs more precisely, but getting all the interactions right
67 would be complicated.
68
69 For the targets supporting low-overhead loops, IVOPTs has to take care of
70 the loops which will probably be transformed in RTL doloop optimization,
71 to try to make selected IV candidate set optimal. The process of doloop
72 support includes:
73
74 1) Analyze the current loop will be transformed to doloop or not, find and
75 mark its compare type IV use as doloop use (iv_group field doloop_p), and
76 set flag doloop_use_p of ivopts_data to notify subsequent processings on
77 doloop. See analyze_and_mark_doloop_use and its callees for the details.
78 The target hook predict_doloop_p can be used for target specific checks.
79
80 2) Add one doloop dedicated IV cand {(may_be_zero ? 1 : (niter + 1)), +, -1},
81 set flag doloop_p of iv_cand, step cost is set as zero and no extra cost
82 like biv. For cost determination between doloop IV cand and IV use, the
83 target hooks doloop_cost_for_generic and doloop_cost_for_address are
84 provided to add on extra costs for generic type and address type IV use.
85 Zero cost is assigned to the pair between doloop IV cand and doloop IV
86 use, and bound zero is set for IV elimination.
87
88 3) With the cost setting in step 2), the current cost model based IV
89 selection algorithm will process as usual, pick up doloop dedicated IV if
90 profitable. */
91
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "rtl.h"
97 #include "tree.h"
98 #include "gimple.h"
99 #include "cfghooks.h"
100 #include "tree-pass.h"
101 #include "memmodel.h"
102 #include "tm_p.h"
103 #include "ssa.h"
104 #include "expmed.h"
105 #include "insn-config.h"
106 #include "emit-rtl.h"
107 #include "recog.h"
108 #include "cgraph.h"
109 #include "gimple-pretty-print.h"
110 #include "alias.h"
111 #include "fold-const.h"
112 #include "stor-layout.h"
113 #include "tree-eh.h"
114 #include "gimplify.h"
115 #include "gimple-iterator.h"
116 #include "gimplify-me.h"
117 #include "tree-cfg.h"
118 #include "tree-ssa-loop-ivopts.h"
119 #include "tree-ssa-loop-manip.h"
120 #include "tree-ssa-loop-niter.h"
121 #include "tree-ssa-loop.h"
122 #include "explow.h"
123 #include "expr.h"
124 #include "tree-dfa.h"
125 #include "tree-ssa.h"
126 #include "cfgloop.h"
127 #include "tree-scalar-evolution.h"
128 #include "tree-affine.h"
129 #include "tree-ssa-propagate.h"
130 #include "tree-ssa-address.h"
131 #include "builtins.h"
132 #include "tree-vectorizer.h"
133 #include "dbgcnt.h"
134
135 /* For lang_hooks.types.type_for_mode. */
136 #include "langhooks.h"
137
138 /* FIXME: Expressions are expanded to RTL in this pass to determine the
139 cost of different addressing modes. This should be moved to a TBD
140 interface between the GIMPLE and RTL worlds. */
141
142 /* The infinite cost. */
143 #define INFTY 1000000000
144
145 /* Returns the expected number of loop iterations for LOOP.
146 The average trip count is computed from profile data if it
147 exists. */
148
149 static inline HOST_WIDE_INT
150 avg_loop_niter (class loop *loop)
151 {
152 HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
153 if (niter == -1)
154 {
155 niter = likely_max_stmt_executions_int (loop);
156
157 if (niter == -1 || niter > param_avg_loop_niter)
158 return param_avg_loop_niter;
159 }
160
161 return niter;
162 }
163
164 struct iv_use;
165
166 /* Representation of the induction variable. */
167 struct iv
168 {
169 tree base; /* Initial value of the iv. */
170 tree base_object; /* A memory object to that the induction variable points. */
171 tree step; /* Step of the iv (constant only). */
172 tree ssa_name; /* The ssa name with the value. */
173 struct iv_use *nonlin_use; /* The identifier in the use if it is the case. */
174 bool biv_p; /* Is it a biv? */
175 bool no_overflow; /* True if the iv doesn't overflow. */
176 bool have_address_use;/* For biv, indicate if it's used in any address
177 type use. */
178 };
179
180 /* Per-ssa version information (induction variable descriptions, etc.). */
181 struct version_info
182 {
183 tree name; /* The ssa name. */
184 struct iv *iv; /* Induction variable description. */
185 bool has_nonlin_use; /* For a loop-level invariant, whether it is used in
186 an expression that is not an induction variable. */
187 bool preserve_biv; /* For the original biv, whether to preserve it. */
188 unsigned inv_id; /* Id of an invariant. */
189 };
190
191 /* Types of uses. */
192 enum use_type
193 {
194 USE_NONLINEAR_EXPR, /* Use in a nonlinear expression. */
195 USE_REF_ADDRESS, /* Use is an address for an explicit memory
196 reference. */
197 USE_PTR_ADDRESS, /* Use is a pointer argument to a function in
198 cases where the expansion of the function
199 will turn the argument into a normal address. */
200 USE_COMPARE /* Use is a compare. */
201 };
202
203 /* Cost of a computation. */
204 class comp_cost
205 {
206 public:
207 comp_cost (): cost (0), complexity (0), scratch (0)
208 {}
209
210 comp_cost (int64_t cost, unsigned complexity, int64_t scratch = 0)
211 : cost (cost), complexity (complexity), scratch (scratch)
212 {}
213
214 /* Returns true if COST is infinite. */
215 bool infinite_cost_p ();
216
217 /* Adds costs COST1 and COST2. */
218 friend comp_cost operator+ (comp_cost cost1, comp_cost cost2);
219
220 /* Adds COST to the comp_cost. */
221 comp_cost operator+= (comp_cost cost);
222
223 /* Adds constant C to this comp_cost. */
224 comp_cost operator+= (HOST_WIDE_INT c);
225
226 /* Subtracts constant C to this comp_cost. */
227 comp_cost operator-= (HOST_WIDE_INT c);
228
229 /* Divide the comp_cost by constant C. */
230 comp_cost operator/= (HOST_WIDE_INT c);
231
232 /* Multiply the comp_cost by constant C. */
233 comp_cost operator*= (HOST_WIDE_INT c);
234
235 /* Subtracts costs COST1 and COST2. */
236 friend comp_cost operator- (comp_cost cost1, comp_cost cost2);
237
238 /* Subtracts COST from this comp_cost. */
239 comp_cost operator-= (comp_cost cost);
240
241 /* Returns true if COST1 is smaller than COST2. */
242 friend bool operator< (comp_cost cost1, comp_cost cost2);
243
244 /* Returns true if COST1 and COST2 are equal. */
245 friend bool operator== (comp_cost cost1, comp_cost cost2);
246
247 /* Returns true if COST1 is smaller or equal than COST2. */
248 friend bool operator<= (comp_cost cost1, comp_cost cost2);
249
250 int64_t cost; /* The runtime cost. */
251 unsigned complexity; /* The estimate of the complexity of the code for
252 the computation (in no concrete units --
253 complexity field should be larger for more
254 complex expressions and addressing modes). */
255 int64_t scratch; /* Scratch used during cost computation. */
256 };
257
258 static const comp_cost no_cost;
259 static const comp_cost infinite_cost (INFTY, 0, INFTY);
260
261 bool
262 comp_cost::infinite_cost_p ()
263 {
264 return cost == INFTY;
265 }
266
267 comp_cost
268 operator+ (comp_cost cost1, comp_cost cost2)
269 {
270 if (cost1.infinite_cost_p () || cost2.infinite_cost_p ())
271 return infinite_cost;
272
273 gcc_assert (cost1.cost + cost2.cost < infinite_cost.cost);
274 cost1.cost += cost2.cost;
275 cost1.complexity += cost2.complexity;
276
277 return cost1;
278 }
279
280 comp_cost
281 operator- (comp_cost cost1, comp_cost cost2)
282 {
283 if (cost1.infinite_cost_p ())
284 return infinite_cost;
285
286 gcc_assert (!cost2.infinite_cost_p ());
287 gcc_assert (cost1.cost - cost2.cost < infinite_cost.cost);
288
289 cost1.cost -= cost2.cost;
290 cost1.complexity -= cost2.complexity;
291
292 return cost1;
293 }
294
295 comp_cost
296 comp_cost::operator+= (comp_cost cost)
297 {
298 *this = *this + cost;
299 return *this;
300 }
301
302 comp_cost
303 comp_cost::operator+= (HOST_WIDE_INT c)
304 {
305 if (c >= INFTY)
306 this->cost = INFTY;
307
308 if (infinite_cost_p ())
309 return *this;
310
311 gcc_assert (this->cost + c < infinite_cost.cost);
312 this->cost += c;
313
314 return *this;
315 }
316
317 comp_cost
318 comp_cost::operator-= (HOST_WIDE_INT c)
319 {
320 if (infinite_cost_p ())
321 return *this;
322
323 gcc_assert (this->cost - c < infinite_cost.cost);
324 this->cost -= c;
325
326 return *this;
327 }
328
329 comp_cost
330 comp_cost::operator/= (HOST_WIDE_INT c)
331 {
332 gcc_assert (c != 0);
333 if (infinite_cost_p ())
334 return *this;
335
336 this->cost /= c;
337
338 return *this;
339 }
340
341 comp_cost
342 comp_cost::operator*= (HOST_WIDE_INT c)
343 {
344 if (infinite_cost_p ())
345 return *this;
346
347 gcc_assert (this->cost * c < infinite_cost.cost);
348 this->cost *= c;
349
350 return *this;
351 }
352
353 comp_cost
354 comp_cost::operator-= (comp_cost cost)
355 {
356 *this = *this - cost;
357 return *this;
358 }
359
360 bool
361 operator< (comp_cost cost1, comp_cost cost2)
362 {
363 if (cost1.cost == cost2.cost)
364 return cost1.complexity < cost2.complexity;
365
366 return cost1.cost < cost2.cost;
367 }
368
369 bool
370 operator== (comp_cost cost1, comp_cost cost2)
371 {
372 return cost1.cost == cost2.cost
373 && cost1.complexity == cost2.complexity;
374 }
375
376 bool
377 operator<= (comp_cost cost1, comp_cost cost2)
378 {
379 return cost1 < cost2 || cost1 == cost2;
380 }
381
382 struct iv_inv_expr_ent;
383
384 /* The candidate - cost pair. */
385 class cost_pair
386 {
387 public:
388 struct iv_cand *cand; /* The candidate. */
389 comp_cost cost; /* The cost. */
390 enum tree_code comp; /* For iv elimination, the comparison. */
391 bitmap inv_vars; /* The list of invariant ssa_vars that have to be
392 preserved when representing iv_use with iv_cand. */
393 bitmap inv_exprs; /* The list of newly created invariant expressions
394 when representing iv_use with iv_cand. */
395 tree value; /* For final value elimination, the expression for
396 the final value of the iv. For iv elimination,
397 the new bound to compare with. */
398 };
399
400 /* Use. */
401 struct iv_use
402 {
403 unsigned id; /* The id of the use. */
404 unsigned group_id; /* The group id the use belongs to. */
405 enum use_type type; /* Type of the use. */
406 tree mem_type; /* The memory type to use when testing whether an
407 address is legitimate, and what the address's
408 cost is. */
409 struct iv *iv; /* The induction variable it is based on. */
410 gimple *stmt; /* Statement in that it occurs. */
411 tree *op_p; /* The place where it occurs. */
412
413 tree addr_base; /* Base address with const offset stripped. */
414 poly_uint64_pod addr_offset;
415 /* Const offset stripped from base address. */
416 };
417
418 /* Group of uses. */
419 struct iv_group
420 {
421 /* The id of the group. */
422 unsigned id;
423 /* Uses of the group are of the same type. */
424 enum use_type type;
425 /* The set of "related" IV candidates, plus the important ones. */
426 bitmap related_cands;
427 /* Number of IV candidates in the cost_map. */
428 unsigned n_map_members;
429 /* The costs wrto the iv candidates. */
430 class cost_pair *cost_map;
431 /* The selected candidate for the group. */
432 struct iv_cand *selected;
433 /* To indicate this is a doloop use group. */
434 bool doloop_p;
435 /* Uses in the group. */
436 vec<struct iv_use *> vuses;
437 };
438
439 /* The position where the iv is computed. */
440 enum iv_position
441 {
442 IP_NORMAL, /* At the end, just before the exit condition. */
443 IP_END, /* At the end of the latch block. */
444 IP_BEFORE_USE, /* Immediately before a specific use. */
445 IP_AFTER_USE, /* Immediately after a specific use. */
446 IP_ORIGINAL /* The original biv. */
447 };
448
449 /* The induction variable candidate. */
450 struct iv_cand
451 {
452 unsigned id; /* The number of the candidate. */
453 bool important; /* Whether this is an "important" candidate, i.e. such
454 that it should be considered by all uses. */
455 bool involves_undefs; /* Whether the IV involves undefined values. */
456 ENUM_BITFIELD(iv_position) pos : 8; /* Where it is computed. */
457 gimple *incremented_at;/* For original biv, the statement where it is
458 incremented. */
459 tree var_before; /* The variable used for it before increment. */
460 tree var_after; /* The variable used for it after increment. */
461 struct iv *iv; /* The value of the candidate. NULL for
462 "pseudocandidate" used to indicate the possibility
463 to replace the final value of an iv by direct
464 computation of the value. */
465 unsigned cost; /* Cost of the candidate. */
466 unsigned cost_step; /* Cost of the candidate's increment operation. */
467 struct iv_use *ainc_use; /* For IP_{BEFORE,AFTER}_USE candidates, the place
468 where it is incremented. */
469 bitmap inv_vars; /* The list of invariant ssa_vars used in step of the
470 iv_cand. */
471 bitmap inv_exprs; /* If step is more complicated than a single ssa_var,
472 handle it as a new invariant expression which will
473 be hoisted out of loop. */
474 struct iv *orig_iv; /* The original iv if this cand is added from biv with
475 smaller type. */
476 bool doloop_p; /* Whether this is a doloop candidate. */
477 };
478
479 /* Hashtable entry for common candidate derived from iv uses. */
480 class iv_common_cand
481 {
482 public:
483 tree base;
484 tree step;
485 /* IV uses from which this common candidate is derived. */
486 auto_vec<struct iv_use *> uses;
487 hashval_t hash;
488 };
489
490 /* Hashtable helpers. */
491
492 struct iv_common_cand_hasher : delete_ptr_hash <iv_common_cand>
493 {
494 static inline hashval_t hash (const iv_common_cand *);
495 static inline bool equal (const iv_common_cand *, const iv_common_cand *);
496 };
497
498 /* Hash function for possible common candidates. */
499
500 inline hashval_t
501 iv_common_cand_hasher::hash (const iv_common_cand *ccand)
502 {
503 return ccand->hash;
504 }
505
506 /* Hash table equality function for common candidates. */
507
508 inline bool
509 iv_common_cand_hasher::equal (const iv_common_cand *ccand1,
510 const iv_common_cand *ccand2)
511 {
512 return (ccand1->hash == ccand2->hash
513 && operand_equal_p (ccand1->base, ccand2->base, 0)
514 && operand_equal_p (ccand1->step, ccand2->step, 0)
515 && (TYPE_PRECISION (TREE_TYPE (ccand1->base))
516 == TYPE_PRECISION (TREE_TYPE (ccand2->base))));
517 }
518
519 /* Loop invariant expression hashtable entry. */
520
521 struct iv_inv_expr_ent
522 {
523 /* Tree expression of the entry. */
524 tree expr;
525 /* Unique indentifier. */
526 int id;
527 /* Hash value. */
528 hashval_t hash;
529 };
530
531 /* Sort iv_inv_expr_ent pair A and B by id field. */
532
533 static int
534 sort_iv_inv_expr_ent (const void *a, const void *b)
535 {
536 const iv_inv_expr_ent * const *e1 = (const iv_inv_expr_ent * const *) (a);
537 const iv_inv_expr_ent * const *e2 = (const iv_inv_expr_ent * const *) (b);
538
539 unsigned id1 = (*e1)->id;
540 unsigned id2 = (*e2)->id;
541
542 if (id1 < id2)
543 return -1;
544 else if (id1 > id2)
545 return 1;
546 else
547 return 0;
548 }
549
550 /* Hashtable helpers. */
551
552 struct iv_inv_expr_hasher : free_ptr_hash <iv_inv_expr_ent>
553 {
554 static inline hashval_t hash (const iv_inv_expr_ent *);
555 static inline bool equal (const iv_inv_expr_ent *, const iv_inv_expr_ent *);
556 };
557
558 /* Return true if uses of type TYPE represent some form of address. */
559
560 inline bool
561 address_p (use_type type)
562 {
563 return type == USE_REF_ADDRESS || type == USE_PTR_ADDRESS;
564 }
565
566 /* Hash function for loop invariant expressions. */
567
568 inline hashval_t
569 iv_inv_expr_hasher::hash (const iv_inv_expr_ent *expr)
570 {
571 return expr->hash;
572 }
573
574 /* Hash table equality function for expressions. */
575
576 inline bool
577 iv_inv_expr_hasher::equal (const iv_inv_expr_ent *expr1,
578 const iv_inv_expr_ent *expr2)
579 {
580 return expr1->hash == expr2->hash
581 && operand_equal_p (expr1->expr, expr2->expr, 0);
582 }
583
584 struct ivopts_data
585 {
586 /* The currently optimized loop. */
587 class loop *current_loop;
588 location_t loop_loc;
589
590 /* Numbers of iterations for all exits of the current loop. */
591 hash_map<edge, tree_niter_desc *> *niters;
592
593 /* Number of registers used in it. */
594 unsigned regs_used;
595
596 /* The size of version_info array allocated. */
597 unsigned version_info_size;
598
599 /* The array of information for the ssa names. */
600 struct version_info *version_info;
601
602 /* The hashtable of loop invariant expressions created
603 by ivopt. */
604 hash_table<iv_inv_expr_hasher> *inv_expr_tab;
605
606 /* The bitmap of indices in version_info whose value was changed. */
607 bitmap relevant;
608
609 /* The uses of induction variables. */
610 vec<iv_group *> vgroups;
611
612 /* The candidates. */
613 vec<iv_cand *> vcands;
614
615 /* A bitmap of important candidates. */
616 bitmap important_candidates;
617
618 /* Cache used by tree_to_aff_combination_expand. */
619 hash_map<tree, name_expansion *> *name_expansion_cache;
620
621 /* The hashtable of common candidates derived from iv uses. */
622 hash_table<iv_common_cand_hasher> *iv_common_cand_tab;
623
624 /* The common candidates. */
625 vec<iv_common_cand *> iv_common_cands;
626
627 /* Hash map recording base object information of tree exp. */
628 hash_map<tree, tree> *base_object_map;
629
630 /* The maximum invariant variable id. */
631 unsigned max_inv_var_id;
632
633 /* The maximum invariant expression id. */
634 unsigned max_inv_expr_id;
635
636 /* Number of no_overflow BIVs which are not used in memory address. */
637 unsigned bivs_not_used_in_addr;
638
639 /* Obstack for iv structure. */
640 struct obstack iv_obstack;
641
642 /* Whether to consider just related and important candidates when replacing a
643 use. */
644 bool consider_all_candidates;
645
646 /* Are we optimizing for speed? */
647 bool speed;
648
649 /* Whether the loop body includes any function calls. */
650 bool body_includes_call;
651
652 /* Whether the loop body can only be exited via single exit. */
653 bool loop_single_exit_p;
654
655 /* Whether the loop has doloop comparison use. */
656 bool doloop_use_p;
657 };
658
659 /* An assignment of iv candidates to uses. */
660
661 class iv_ca
662 {
663 public:
664 /* The number of uses covered by the assignment. */
665 unsigned upto;
666
667 /* Number of uses that cannot be expressed by the candidates in the set. */
668 unsigned bad_groups;
669
670 /* Candidate assigned to a use, together with the related costs. */
671 class cost_pair **cand_for_group;
672
673 /* Number of times each candidate is used. */
674 unsigned *n_cand_uses;
675
676 /* The candidates used. */
677 bitmap cands;
678
679 /* The number of candidates in the set. */
680 unsigned n_cands;
681
682 /* The number of invariants needed, including both invariant variants and
683 invariant expressions. */
684 unsigned n_invs;
685
686 /* Total cost of expressing uses. */
687 comp_cost cand_use_cost;
688
689 /* Total cost of candidates. */
690 int64_t cand_cost;
691
692 /* Number of times each invariant variable is used. */
693 unsigned *n_inv_var_uses;
694
695 /* Number of times each invariant expression is used. */
696 unsigned *n_inv_expr_uses;
697
698 /* Total cost of the assignment. */
699 comp_cost cost;
700 };
701
702 /* Difference of two iv candidate assignments. */
703
704 struct iv_ca_delta
705 {
706 /* Changed group. */
707 struct iv_group *group;
708
709 /* An old assignment (for rollback purposes). */
710 class cost_pair *old_cp;
711
712 /* A new assignment. */
713 class cost_pair *new_cp;
714
715 /* Next change in the list. */
716 struct iv_ca_delta *next;
717 };
718
719 /* Bound on number of candidates below that all candidates are considered. */
720
721 #define CONSIDER_ALL_CANDIDATES_BOUND \
722 ((unsigned) param_iv_consider_all_candidates_bound)
723
724 /* If there are more iv occurrences, we just give up (it is quite unlikely that
725 optimizing such a loop would help, and it would take ages). */
726
727 #define MAX_CONSIDERED_GROUPS \
728 ((unsigned) param_iv_max_considered_uses)
729
730 /* If there are at most this number of ivs in the set, try removing unnecessary
731 ivs from the set always. */
732
733 #define ALWAYS_PRUNE_CAND_SET_BOUND \
734 ((unsigned) param_iv_always_prune_cand_set_bound)
735
736 /* The list of trees for that the decl_rtl field must be reset is stored
737 here. */
738
739 static vec<tree> decl_rtl_to_reset;
740
741 static comp_cost force_expr_to_var_cost (tree, bool);
742
743 /* The single loop exit if it dominates the latch, NULL otherwise. */
744
745 edge
746 single_dom_exit (class loop *loop)
747 {
748 edge exit = single_exit (loop);
749
750 if (!exit)
751 return NULL;
752
753 if (!just_once_each_iteration_p (loop, exit->src))
754 return NULL;
755
756 return exit;
757 }
758
759 /* Dumps information about the induction variable IV to FILE. Don't dump
760 variable's name if DUMP_NAME is FALSE. The information is dumped with
761 preceding spaces indicated by INDENT_LEVEL. */
762
763 void
764 dump_iv (FILE *file, struct iv *iv, bool dump_name, unsigned indent_level)
765 {
766 const char *p;
767 const char spaces[9] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '\0'};
768
769 if (indent_level > 4)
770 indent_level = 4;
771 p = spaces + 8 - (indent_level << 1);
772
773 fprintf (file, "%sIV struct:\n", p);
774 if (iv->ssa_name && dump_name)
775 {
776 fprintf (file, "%s SSA_NAME:\t", p);
777 print_generic_expr (file, iv->ssa_name, TDF_SLIM);
778 fprintf (file, "\n");
779 }
780
781 fprintf (file, "%s Type:\t", p);
782 print_generic_expr (file, TREE_TYPE (iv->base), TDF_SLIM);
783 fprintf (file, "\n");
784
785 fprintf (file, "%s Base:\t", p);
786 print_generic_expr (file, iv->base, TDF_SLIM);
787 fprintf (file, "\n");
788
789 fprintf (file, "%s Step:\t", p);
790 print_generic_expr (file, iv->step, TDF_SLIM);
791 fprintf (file, "\n");
792
793 if (iv->base_object)
794 {
795 fprintf (file, "%s Object:\t", p);
796 print_generic_expr (file, iv->base_object, TDF_SLIM);
797 fprintf (file, "\n");
798 }
799
800 fprintf (file, "%s Biv:\t%c\n", p, iv->biv_p ? 'Y' : 'N');
801
802 fprintf (file, "%s Overflowness wrto loop niter:\t%s\n",
803 p, iv->no_overflow ? "No-overflow" : "Overflow");
804 }
805
806 /* Dumps information about the USE to FILE. */
807
808 void
809 dump_use (FILE *file, struct iv_use *use)
810 {
811 fprintf (file, " Use %d.%d:\n", use->group_id, use->id);
812 fprintf (file, " At stmt:\t");
813 print_gimple_stmt (file, use->stmt, 0);
814 fprintf (file, " At pos:\t");
815 if (use->op_p)
816 print_generic_expr (file, *use->op_p, TDF_SLIM);
817 fprintf (file, "\n");
818 dump_iv (file, use->iv, false, 2);
819 }
820
821 /* Dumps information about the uses to FILE. */
822
823 void
824 dump_groups (FILE *file, struct ivopts_data *data)
825 {
826 unsigned i, j;
827 struct iv_group *group;
828
829 for (i = 0; i < data->vgroups.length (); i++)
830 {
831 group = data->vgroups[i];
832 fprintf (file, "Group %d:\n", group->id);
833 if (group->type == USE_NONLINEAR_EXPR)
834 fprintf (file, " Type:\tGENERIC\n");
835 else if (group->type == USE_REF_ADDRESS)
836 fprintf (file, " Type:\tREFERENCE ADDRESS\n");
837 else if (group->type == USE_PTR_ADDRESS)
838 fprintf (file, " Type:\tPOINTER ARGUMENT ADDRESS\n");
839 else
840 {
841 gcc_assert (group->type == USE_COMPARE);
842 fprintf (file, " Type:\tCOMPARE\n");
843 }
844 for (j = 0; j < group->vuses.length (); j++)
845 dump_use (file, group->vuses[j]);
846 }
847 }
848
849 /* Dumps information about induction variable candidate CAND to FILE. */
850
851 void
852 dump_cand (FILE *file, struct iv_cand *cand)
853 {
854 struct iv *iv = cand->iv;
855
856 fprintf (file, "Candidate %d:\n", cand->id);
857 if (cand->inv_vars)
858 {
859 fprintf (file, " Depend on inv.vars: ");
860 dump_bitmap (file, cand->inv_vars);
861 }
862 if (cand->inv_exprs)
863 {
864 fprintf (file, " Depend on inv.exprs: ");
865 dump_bitmap (file, cand->inv_exprs);
866 }
867
868 if (cand->var_before)
869 {
870 fprintf (file, " Var befor: ");
871 print_generic_expr (file, cand->var_before, TDF_SLIM);
872 fprintf (file, "\n");
873 }
874 if (cand->var_after)
875 {
876 fprintf (file, " Var after: ");
877 print_generic_expr (file, cand->var_after, TDF_SLIM);
878 fprintf (file, "\n");
879 }
880
881 switch (cand->pos)
882 {
883 case IP_NORMAL:
884 fprintf (file, " Incr POS: before exit test\n");
885 break;
886
887 case IP_BEFORE_USE:
888 fprintf (file, " Incr POS: before use %d\n", cand->ainc_use->id);
889 break;
890
891 case IP_AFTER_USE:
892 fprintf (file, " Incr POS: after use %d\n", cand->ainc_use->id);
893 break;
894
895 case IP_END:
896 fprintf (file, " Incr POS: at end\n");
897 break;
898
899 case IP_ORIGINAL:
900 fprintf (file, " Incr POS: orig biv\n");
901 break;
902 }
903
904 dump_iv (file, iv, false, 1);
905 }
906
907 /* Returns the info for ssa version VER. */
908
909 static inline struct version_info *
910 ver_info (struct ivopts_data *data, unsigned ver)
911 {
912 return data->version_info + ver;
913 }
914
915 /* Returns the info for ssa name NAME. */
916
917 static inline struct version_info *
918 name_info (struct ivopts_data *data, tree name)
919 {
920 return ver_info (data, SSA_NAME_VERSION (name));
921 }
922
923 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
924 emitted in LOOP. */
925
926 static bool
927 stmt_after_ip_normal_pos (class loop *loop, gimple *stmt)
928 {
929 basic_block bb = ip_normal_pos (loop), sbb = gimple_bb (stmt);
930
931 gcc_assert (bb);
932
933 if (sbb == loop->latch)
934 return true;
935
936 if (sbb != bb)
937 return false;
938
939 return stmt == last_stmt (bb);
940 }
941
942 /* Returns true if STMT if after the place where the original induction
943 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
944 if the positions are identical. */
945
946 static bool
947 stmt_after_inc_pos (struct iv_cand *cand, gimple *stmt, bool true_if_equal)
948 {
949 basic_block cand_bb = gimple_bb (cand->incremented_at);
950 basic_block stmt_bb = gimple_bb (stmt);
951
952 if (!dominated_by_p (CDI_DOMINATORS, stmt_bb, cand_bb))
953 return false;
954
955 if (stmt_bb != cand_bb)
956 return true;
957
958 if (true_if_equal
959 && gimple_uid (stmt) == gimple_uid (cand->incremented_at))
960 return true;
961 return gimple_uid (stmt) > gimple_uid (cand->incremented_at);
962 }
963
964 /* Returns true if STMT if after the place where the induction variable
965 CAND is incremented in LOOP. */
966
967 static bool
968 stmt_after_increment (class loop *loop, struct iv_cand *cand, gimple *stmt)
969 {
970 switch (cand->pos)
971 {
972 case IP_END:
973 return false;
974
975 case IP_NORMAL:
976 return stmt_after_ip_normal_pos (loop, stmt);
977
978 case IP_ORIGINAL:
979 case IP_AFTER_USE:
980 return stmt_after_inc_pos (cand, stmt, false);
981
982 case IP_BEFORE_USE:
983 return stmt_after_inc_pos (cand, stmt, true);
984
985 default:
986 gcc_unreachable ();
987 }
988 }
989
990 /* walk_tree callback for contains_abnormal_ssa_name_p. */
991
992 static tree
993 contains_abnormal_ssa_name_p_1 (tree *tp, int *walk_subtrees, void *)
994 {
995 if (TREE_CODE (*tp) == SSA_NAME
996 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (*tp))
997 return *tp;
998
999 if (!EXPR_P (*tp))
1000 *walk_subtrees = 0;
1001
1002 return NULL_TREE;
1003 }
1004
1005 /* Returns true if EXPR contains a ssa name that occurs in an
1006 abnormal phi node. */
1007
1008 bool
1009 contains_abnormal_ssa_name_p (tree expr)
1010 {
1011 return walk_tree_without_duplicates
1012 (&expr, contains_abnormal_ssa_name_p_1, NULL) != NULL_TREE;
1013 }
1014
1015 /* Returns the structure describing number of iterations determined from
1016 EXIT of DATA->current_loop, or NULL if something goes wrong. */
1017
1018 static class tree_niter_desc *
1019 niter_for_exit (struct ivopts_data *data, edge exit)
1020 {
1021 class tree_niter_desc *desc;
1022 tree_niter_desc **slot;
1023
1024 if (!data->niters)
1025 {
1026 data->niters = new hash_map<edge, tree_niter_desc *>;
1027 slot = NULL;
1028 }
1029 else
1030 slot = data->niters->get (exit);
1031
1032 if (!slot)
1033 {
1034 /* Try to determine number of iterations. We cannot safely work with ssa
1035 names that appear in phi nodes on abnormal edges, so that we do not
1036 create overlapping life ranges for them (PR 27283). */
1037 desc = XNEW (class tree_niter_desc);
1038 if (!number_of_iterations_exit (data->current_loop,
1039 exit, desc, true)
1040 || contains_abnormal_ssa_name_p (desc->niter))
1041 {
1042 XDELETE (desc);
1043 desc = NULL;
1044 }
1045 data->niters->put (exit, desc);
1046 }
1047 else
1048 desc = *slot;
1049
1050 return desc;
1051 }
1052
1053 /* Returns the structure describing number of iterations determined from
1054 single dominating exit of DATA->current_loop, or NULL if something
1055 goes wrong. */
1056
1057 static class tree_niter_desc *
1058 niter_for_single_dom_exit (struct ivopts_data *data)
1059 {
1060 edge exit = single_dom_exit (data->current_loop);
1061
1062 if (!exit)
1063 return NULL;
1064
1065 return niter_for_exit (data, exit);
1066 }
1067
1068 /* Initializes data structures used by the iv optimization pass, stored
1069 in DATA. */
1070
1071 static void
1072 tree_ssa_iv_optimize_init (struct ivopts_data *data)
1073 {
1074 data->version_info_size = 2 * num_ssa_names;
1075 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
1076 data->relevant = BITMAP_ALLOC (NULL);
1077 data->important_candidates = BITMAP_ALLOC (NULL);
1078 data->max_inv_var_id = 0;
1079 data->max_inv_expr_id = 0;
1080 data->niters = NULL;
1081 data->vgroups.create (20);
1082 data->vcands.create (20);
1083 data->inv_expr_tab = new hash_table<iv_inv_expr_hasher> (10);
1084 data->name_expansion_cache = NULL;
1085 data->base_object_map = NULL;
1086 data->iv_common_cand_tab = new hash_table<iv_common_cand_hasher> (10);
1087 data->iv_common_cands.create (20);
1088 decl_rtl_to_reset.create (20);
1089 gcc_obstack_init (&data->iv_obstack);
1090 }
1091
1092 /* walk_tree callback for determine_base_object. */
1093
1094 static tree
1095 determine_base_object_1 (tree *tp, int *walk_subtrees, void *wdata)
1096 {
1097 tree_code code = TREE_CODE (*tp);
1098 tree obj = NULL_TREE;
1099 if (code == ADDR_EXPR)
1100 {
1101 tree base = get_base_address (TREE_OPERAND (*tp, 0));
1102 if (!base)
1103 obj = *tp;
1104 else if (TREE_CODE (base) != MEM_REF)
1105 obj = fold_convert (ptr_type_node, build_fold_addr_expr (base));
1106 }
1107 else if (code == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (*tp)))
1108 obj = fold_convert (ptr_type_node, *tp);
1109
1110 if (!obj)
1111 {
1112 if (!EXPR_P (*tp))
1113 *walk_subtrees = 0;
1114
1115 return NULL_TREE;
1116 }
1117 /* Record special node for multiple base objects and stop. */
1118 if (*static_cast<tree *> (wdata))
1119 {
1120 *static_cast<tree *> (wdata) = integer_zero_node;
1121 return integer_zero_node;
1122 }
1123 /* Record the base object and continue looking. */
1124 *static_cast<tree *> (wdata) = obj;
1125 return NULL_TREE;
1126 }
1127
1128 /* Returns a memory object to that EXPR points with caching. Return NULL if we
1129 are able to determine that it does not point to any such object; specially
1130 return integer_zero_node if EXPR contains multiple base objects. */
1131
1132 static tree
1133 determine_base_object (struct ivopts_data *data, tree expr)
1134 {
1135 tree *slot, obj = NULL_TREE;
1136 if (data->base_object_map)
1137 {
1138 if ((slot = data->base_object_map->get(expr)) != NULL)
1139 return *slot;
1140 }
1141 else
1142 data->base_object_map = new hash_map<tree, tree>;
1143
1144 (void) walk_tree_without_duplicates (&expr, determine_base_object_1, &obj);
1145 data->base_object_map->put (expr, obj);
1146 return obj;
1147 }
1148
1149 /* Return true if address expression with non-DECL_P operand appears
1150 in EXPR. */
1151
1152 static bool
1153 contain_complex_addr_expr (tree expr)
1154 {
1155 bool res = false;
1156
1157 STRIP_NOPS (expr);
1158 switch (TREE_CODE (expr))
1159 {
1160 case POINTER_PLUS_EXPR:
1161 case PLUS_EXPR:
1162 case MINUS_EXPR:
1163 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 0));
1164 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 1));
1165 break;
1166
1167 case ADDR_EXPR:
1168 return (!DECL_P (TREE_OPERAND (expr, 0)));
1169
1170 default:
1171 return false;
1172 }
1173
1174 return res;
1175 }
1176
1177 /* Allocates an induction variable with given initial value BASE and step STEP
1178 for loop LOOP. NO_OVERFLOW implies the iv doesn't overflow. */
1179
1180 static struct iv *
1181 alloc_iv (struct ivopts_data *data, tree base, tree step,
1182 bool no_overflow = false)
1183 {
1184 tree expr = base;
1185 struct iv *iv = (struct iv*) obstack_alloc (&data->iv_obstack,
1186 sizeof (struct iv));
1187 gcc_assert (step != NULL_TREE);
1188
1189 /* Lower address expression in base except ones with DECL_P as operand.
1190 By doing this:
1191 1) More accurate cost can be computed for address expressions;
1192 2) Duplicate candidates won't be created for bases in different
1193 forms, like &a[0] and &a. */
1194 STRIP_NOPS (expr);
1195 if ((TREE_CODE (expr) == ADDR_EXPR && !DECL_P (TREE_OPERAND (expr, 0)))
1196 || contain_complex_addr_expr (expr))
1197 {
1198 aff_tree comb;
1199 tree_to_aff_combination (expr, TREE_TYPE (expr), &comb);
1200 base = fold_convert (TREE_TYPE (base), aff_combination_to_tree (&comb));
1201 }
1202
1203 iv->base = base;
1204 iv->base_object = determine_base_object (data, base);
1205 iv->step = step;
1206 iv->biv_p = false;
1207 iv->nonlin_use = NULL;
1208 iv->ssa_name = NULL_TREE;
1209 if (!no_overflow
1210 && !iv_can_overflow_p (data->current_loop, TREE_TYPE (base),
1211 base, step))
1212 no_overflow = true;
1213 iv->no_overflow = no_overflow;
1214 iv->have_address_use = false;
1215
1216 return iv;
1217 }
1218
1219 /* Sets STEP and BASE for induction variable IV. NO_OVERFLOW implies the IV
1220 doesn't overflow. */
1221
1222 static void
1223 set_iv (struct ivopts_data *data, tree iv, tree base, tree step,
1224 bool no_overflow)
1225 {
1226 struct version_info *info = name_info (data, iv);
1227
1228 gcc_assert (!info->iv);
1229
1230 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (iv));
1231 info->iv = alloc_iv (data, base, step, no_overflow);
1232 info->iv->ssa_name = iv;
1233 }
1234
1235 /* Finds induction variable declaration for VAR. */
1236
1237 static struct iv *
1238 get_iv (struct ivopts_data *data, tree var)
1239 {
1240 basic_block bb;
1241 tree type = TREE_TYPE (var);
1242
1243 if (!POINTER_TYPE_P (type)
1244 && !INTEGRAL_TYPE_P (type))
1245 return NULL;
1246
1247 if (!name_info (data, var)->iv)
1248 {
1249 bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1250
1251 if (!bb
1252 || !flow_bb_inside_loop_p (data->current_loop, bb))
1253 {
1254 if (POINTER_TYPE_P (type))
1255 type = sizetype;
1256 set_iv (data, var, var, build_int_cst (type, 0), true);
1257 }
1258 }
1259
1260 return name_info (data, var)->iv;
1261 }
1262
1263 /* Return the first non-invariant ssa var found in EXPR. */
1264
1265 static tree
1266 extract_single_var_from_expr (tree expr)
1267 {
1268 int i, n;
1269 tree tmp;
1270 enum tree_code code;
1271
1272 if (!expr || is_gimple_min_invariant (expr))
1273 return NULL;
1274
1275 code = TREE_CODE (expr);
1276 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1277 {
1278 n = TREE_OPERAND_LENGTH (expr);
1279 for (i = 0; i < n; i++)
1280 {
1281 tmp = extract_single_var_from_expr (TREE_OPERAND (expr, i));
1282
1283 if (tmp)
1284 return tmp;
1285 }
1286 }
1287 return (TREE_CODE (expr) == SSA_NAME) ? expr : NULL;
1288 }
1289
1290 /* Finds basic ivs. */
1291
1292 static bool
1293 find_bivs (struct ivopts_data *data)
1294 {
1295 gphi *phi;
1296 affine_iv iv;
1297 tree step, type, base, stop;
1298 bool found = false;
1299 class loop *loop = data->current_loop;
1300 gphi_iterator psi;
1301
1302 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1303 {
1304 phi = psi.phi ();
1305
1306 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)))
1307 continue;
1308
1309 if (virtual_operand_p (PHI_RESULT (phi)))
1310 continue;
1311
1312 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
1313 continue;
1314
1315 if (integer_zerop (iv.step))
1316 continue;
1317
1318 step = iv.step;
1319 base = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1320 /* Stop expanding iv base at the first ssa var referred by iv step.
1321 Ideally we should stop at any ssa var, because that's expensive
1322 and unusual to happen, we just do it on the first one.
1323
1324 See PR64705 for the rationale. */
1325 stop = extract_single_var_from_expr (step);
1326 base = expand_simple_operations (base, stop);
1327 if (contains_abnormal_ssa_name_p (base)
1328 || contains_abnormal_ssa_name_p (step))
1329 continue;
1330
1331 type = TREE_TYPE (PHI_RESULT (phi));
1332 base = fold_convert (type, base);
1333 if (step)
1334 {
1335 if (POINTER_TYPE_P (type))
1336 step = convert_to_ptrofftype (step);
1337 else
1338 step = fold_convert (type, step);
1339 }
1340
1341 set_iv (data, PHI_RESULT (phi), base, step, iv.no_overflow);
1342 found = true;
1343 }
1344
1345 return found;
1346 }
1347
1348 /* Marks basic ivs. */
1349
1350 static void
1351 mark_bivs (struct ivopts_data *data)
1352 {
1353 gphi *phi;
1354 gimple *def;
1355 tree var;
1356 struct iv *iv, *incr_iv;
1357 class loop *loop = data->current_loop;
1358 basic_block incr_bb;
1359 gphi_iterator psi;
1360
1361 data->bivs_not_used_in_addr = 0;
1362 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1363 {
1364 phi = psi.phi ();
1365
1366 iv = get_iv (data, PHI_RESULT (phi));
1367 if (!iv)
1368 continue;
1369
1370 var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1371 def = SSA_NAME_DEF_STMT (var);
1372 /* Don't mark iv peeled from other one as biv. */
1373 if (def
1374 && gimple_code (def) == GIMPLE_PHI
1375 && gimple_bb (def) == loop->header)
1376 continue;
1377
1378 incr_iv = get_iv (data, var);
1379 if (!incr_iv)
1380 continue;
1381
1382 /* If the increment is in the subloop, ignore it. */
1383 incr_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1384 if (incr_bb->loop_father != data->current_loop
1385 || (incr_bb->flags & BB_IRREDUCIBLE_LOOP))
1386 continue;
1387
1388 iv->biv_p = true;
1389 incr_iv->biv_p = true;
1390 if (iv->no_overflow)
1391 data->bivs_not_used_in_addr++;
1392 if (incr_iv->no_overflow)
1393 data->bivs_not_used_in_addr++;
1394 }
1395 }
1396
1397 /* Checks whether STMT defines a linear induction variable and stores its
1398 parameters to IV. */
1399
1400 static bool
1401 find_givs_in_stmt_scev (struct ivopts_data *data, gimple *stmt, affine_iv *iv)
1402 {
1403 tree lhs, stop;
1404 class loop *loop = data->current_loop;
1405
1406 iv->base = NULL_TREE;
1407 iv->step = NULL_TREE;
1408
1409 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1410 return false;
1411
1412 lhs = gimple_assign_lhs (stmt);
1413 if (TREE_CODE (lhs) != SSA_NAME)
1414 return false;
1415
1416 if (!simple_iv (loop, loop_containing_stmt (stmt), lhs, iv, true))
1417 return false;
1418
1419 /* Stop expanding iv base at the first ssa var referred by iv step.
1420 Ideally we should stop at any ssa var, because that's expensive
1421 and unusual to happen, we just do it on the first one.
1422
1423 See PR64705 for the rationale. */
1424 stop = extract_single_var_from_expr (iv->step);
1425 iv->base = expand_simple_operations (iv->base, stop);
1426 if (contains_abnormal_ssa_name_p (iv->base)
1427 || contains_abnormal_ssa_name_p (iv->step))
1428 return false;
1429
1430 /* If STMT could throw, then do not consider STMT as defining a GIV.
1431 While this will suppress optimizations, we cannot safely delete this
1432 GIV and associated statements, even if it appears it is not used. */
1433 if (stmt_could_throw_p (cfun, stmt))
1434 return false;
1435
1436 return true;
1437 }
1438
1439 /* Finds general ivs in statement STMT. */
1440
1441 static void
1442 find_givs_in_stmt (struct ivopts_data *data, gimple *stmt)
1443 {
1444 affine_iv iv;
1445
1446 if (!find_givs_in_stmt_scev (data, stmt, &iv))
1447 return;
1448
1449 set_iv (data, gimple_assign_lhs (stmt), iv.base, iv.step, iv.no_overflow);
1450 }
1451
1452 /* Finds general ivs in basic block BB. */
1453
1454 static void
1455 find_givs_in_bb (struct ivopts_data *data, basic_block bb)
1456 {
1457 gimple_stmt_iterator bsi;
1458
1459 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1460 find_givs_in_stmt (data, gsi_stmt (bsi));
1461 }
1462
1463 /* Finds general ivs. */
1464
1465 static void
1466 find_givs (struct ivopts_data *data, basic_block *body)
1467 {
1468 class loop *loop = data->current_loop;
1469 unsigned i;
1470
1471 for (i = 0; i < loop->num_nodes; i++)
1472 find_givs_in_bb (data, body[i]);
1473 }
1474
1475 /* For each ssa name defined in LOOP determines whether it is an induction
1476 variable and if so, its initial value and step. */
1477
1478 static bool
1479 find_induction_variables (struct ivopts_data *data, basic_block *body)
1480 {
1481 unsigned i;
1482 bitmap_iterator bi;
1483
1484 if (!find_bivs (data))
1485 return false;
1486
1487 find_givs (data, body);
1488 mark_bivs (data);
1489
1490 if (dump_file && (dump_flags & TDF_DETAILS))
1491 {
1492 class tree_niter_desc *niter = niter_for_single_dom_exit (data);
1493
1494 if (niter)
1495 {
1496 fprintf (dump_file, " number of iterations ");
1497 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1498 if (!integer_zerop (niter->may_be_zero))
1499 {
1500 fprintf (dump_file, "; zero if ");
1501 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1502 }
1503 fprintf (dump_file, "\n");
1504 };
1505
1506 fprintf (dump_file, "\n<Induction Vars>:\n");
1507 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1508 {
1509 struct version_info *info = ver_info (data, i);
1510 if (info->iv && info->iv->step && !integer_zerop (info->iv->step))
1511 dump_iv (dump_file, ver_info (data, i)->iv, true, 0);
1512 }
1513 }
1514
1515 return true;
1516 }
1517
1518 /* Records a use of TYPE at *USE_P in STMT whose value is IV in GROUP.
1519 For address type use, ADDR_BASE is the stripped IV base, ADDR_OFFSET
1520 is the const offset stripped from IV base and MEM_TYPE is the type
1521 of the memory being addressed. For uses of other types, ADDR_BASE
1522 and ADDR_OFFSET are zero by default and MEM_TYPE is NULL_TREE. */
1523
1524 static struct iv_use *
1525 record_use (struct iv_group *group, tree *use_p, struct iv *iv,
1526 gimple *stmt, enum use_type type, tree mem_type,
1527 tree addr_base, poly_uint64 addr_offset)
1528 {
1529 struct iv_use *use = XCNEW (struct iv_use);
1530
1531 use->id = group->vuses.length ();
1532 use->group_id = group->id;
1533 use->type = type;
1534 use->mem_type = mem_type;
1535 use->iv = iv;
1536 use->stmt = stmt;
1537 use->op_p = use_p;
1538 use->addr_base = addr_base;
1539 use->addr_offset = addr_offset;
1540
1541 group->vuses.safe_push (use);
1542 return use;
1543 }
1544
1545 /* Checks whether OP is a loop-level invariant and if so, records it.
1546 NONLINEAR_USE is true if the invariant is used in a way we do not
1547 handle specially. */
1548
1549 static void
1550 record_invariant (struct ivopts_data *data, tree op, bool nonlinear_use)
1551 {
1552 basic_block bb;
1553 struct version_info *info;
1554
1555 if (TREE_CODE (op) != SSA_NAME
1556 || virtual_operand_p (op))
1557 return;
1558
1559 bb = gimple_bb (SSA_NAME_DEF_STMT (op));
1560 if (bb
1561 && flow_bb_inside_loop_p (data->current_loop, bb))
1562 return;
1563
1564 info = name_info (data, op);
1565 info->name = op;
1566 info->has_nonlin_use |= nonlinear_use;
1567 if (!info->inv_id)
1568 info->inv_id = ++data->max_inv_var_id;
1569 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (op));
1570 }
1571
1572 /* Record a group of TYPE. */
1573
1574 static struct iv_group *
1575 record_group (struct ivopts_data *data, enum use_type type)
1576 {
1577 struct iv_group *group = XCNEW (struct iv_group);
1578
1579 group->id = data->vgroups.length ();
1580 group->type = type;
1581 group->related_cands = BITMAP_ALLOC (NULL);
1582 group->vuses.create (1);
1583 group->doloop_p = false;
1584
1585 data->vgroups.safe_push (group);
1586 return group;
1587 }
1588
1589 /* Record a use of TYPE at *USE_P in STMT whose value is IV in a group.
1590 New group will be created if there is no existing group for the use.
1591 MEM_TYPE is the type of memory being addressed, or NULL if this
1592 isn't an address reference. */
1593
1594 static struct iv_use *
1595 record_group_use (struct ivopts_data *data, tree *use_p,
1596 struct iv *iv, gimple *stmt, enum use_type type,
1597 tree mem_type)
1598 {
1599 tree addr_base = NULL;
1600 struct iv_group *group = NULL;
1601 poly_uint64 addr_offset = 0;
1602
1603 /* Record non address type use in a new group. */
1604 if (address_p (type))
1605 {
1606 unsigned int i;
1607
1608 addr_base = strip_offset (iv->base, &addr_offset);
1609 for (i = 0; i < data->vgroups.length (); i++)
1610 {
1611 struct iv_use *use;
1612
1613 group = data->vgroups[i];
1614 use = group->vuses[0];
1615 if (!address_p (use->type))
1616 continue;
1617
1618 /* Check if it has the same stripped base and step. */
1619 if (operand_equal_p (iv->base_object, use->iv->base_object, 0)
1620 && operand_equal_p (iv->step, use->iv->step, 0)
1621 && operand_equal_p (addr_base, use->addr_base, 0))
1622 break;
1623 }
1624 if (i == data->vgroups.length ())
1625 group = NULL;
1626 }
1627
1628 if (!group)
1629 group = record_group (data, type);
1630
1631 return record_use (group, use_p, iv, stmt, type, mem_type,
1632 addr_base, addr_offset);
1633 }
1634
1635 /* Checks whether the use OP is interesting and if so, records it. */
1636
1637 static struct iv_use *
1638 find_interesting_uses_op (struct ivopts_data *data, tree op)
1639 {
1640 struct iv *iv;
1641 gimple *stmt;
1642 struct iv_use *use;
1643
1644 if (TREE_CODE (op) != SSA_NAME)
1645 return NULL;
1646
1647 iv = get_iv (data, op);
1648 if (!iv)
1649 return NULL;
1650
1651 if (iv->nonlin_use)
1652 {
1653 gcc_assert (iv->nonlin_use->type == USE_NONLINEAR_EXPR);
1654 return iv->nonlin_use;
1655 }
1656
1657 if (integer_zerop (iv->step))
1658 {
1659 record_invariant (data, op, true);
1660 return NULL;
1661 }
1662
1663 stmt = SSA_NAME_DEF_STMT (op);
1664 gcc_assert (gimple_code (stmt) == GIMPLE_PHI || is_gimple_assign (stmt));
1665
1666 use = record_group_use (data, NULL, iv, stmt, USE_NONLINEAR_EXPR, NULL_TREE);
1667 iv->nonlin_use = use;
1668 return use;
1669 }
1670
1671 /* Indicate how compare type iv_use can be handled. */
1672 enum comp_iv_rewrite
1673 {
1674 COMP_IV_NA,
1675 /* We may rewrite compare type iv_use by expressing value of the iv_use. */
1676 COMP_IV_EXPR,
1677 /* We may rewrite compare type iv_uses on both sides of comparison by
1678 expressing value of each iv_use. */
1679 COMP_IV_EXPR_2,
1680 /* We may rewrite compare type iv_use by expressing value of the iv_use
1681 or by eliminating it with other iv_cand. */
1682 COMP_IV_ELIM
1683 };
1684
1685 /* Given a condition in statement STMT, checks whether it is a compare
1686 of an induction variable and an invariant. If this is the case,
1687 CONTROL_VAR is set to location of the iv, BOUND to the location of
1688 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1689 induction variable descriptions, and true is returned. If this is not
1690 the case, CONTROL_VAR and BOUND are set to the arguments of the
1691 condition and false is returned. */
1692
1693 static enum comp_iv_rewrite
1694 extract_cond_operands (struct ivopts_data *data, gimple *stmt,
1695 tree **control_var, tree **bound,
1696 struct iv **iv_var, struct iv **iv_bound)
1697 {
1698 /* The objects returned when COND has constant operands. */
1699 static struct iv const_iv;
1700 static tree zero;
1701 tree *op0 = &zero, *op1 = &zero;
1702 struct iv *iv0 = &const_iv, *iv1 = &const_iv;
1703 enum comp_iv_rewrite rewrite_type = COMP_IV_NA;
1704
1705 if (gimple_code (stmt) == GIMPLE_COND)
1706 {
1707 gcond *cond_stmt = as_a <gcond *> (stmt);
1708 op0 = gimple_cond_lhs_ptr (cond_stmt);
1709 op1 = gimple_cond_rhs_ptr (cond_stmt);
1710 }
1711 else
1712 {
1713 op0 = gimple_assign_rhs1_ptr (stmt);
1714 op1 = gimple_assign_rhs2_ptr (stmt);
1715 }
1716
1717 zero = integer_zero_node;
1718 const_iv.step = integer_zero_node;
1719
1720 if (TREE_CODE (*op0) == SSA_NAME)
1721 iv0 = get_iv (data, *op0);
1722 if (TREE_CODE (*op1) == SSA_NAME)
1723 iv1 = get_iv (data, *op1);
1724
1725 /* If both sides of comparison are IVs. We can express ivs on both end. */
1726 if (iv0 && iv1 && !integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1727 {
1728 rewrite_type = COMP_IV_EXPR_2;
1729 goto end;
1730 }
1731
1732 /* If none side of comparison is IV. */
1733 if ((!iv0 || integer_zerop (iv0->step))
1734 && (!iv1 || integer_zerop (iv1->step)))
1735 goto end;
1736
1737 /* Control variable may be on the other side. */
1738 if (!iv0 || integer_zerop (iv0->step))
1739 {
1740 std::swap (op0, op1);
1741 std::swap (iv0, iv1);
1742 }
1743 /* If one side is IV and the other side isn't loop invariant. */
1744 if (!iv1)
1745 rewrite_type = COMP_IV_EXPR;
1746 /* If one side is IV and the other side is loop invariant. */
1747 else if (!integer_zerop (iv0->step) && integer_zerop (iv1->step))
1748 rewrite_type = COMP_IV_ELIM;
1749
1750 end:
1751 if (control_var)
1752 *control_var = op0;
1753 if (iv_var)
1754 *iv_var = iv0;
1755 if (bound)
1756 *bound = op1;
1757 if (iv_bound)
1758 *iv_bound = iv1;
1759
1760 return rewrite_type;
1761 }
1762
1763 /* Checks whether the condition in STMT is interesting and if so,
1764 records it. */
1765
1766 static void
1767 find_interesting_uses_cond (struct ivopts_data *data, gimple *stmt)
1768 {
1769 tree *var_p, *bound_p;
1770 struct iv *var_iv, *bound_iv;
1771 enum comp_iv_rewrite ret;
1772
1773 ret = extract_cond_operands (data, stmt,
1774 &var_p, &bound_p, &var_iv, &bound_iv);
1775 if (ret == COMP_IV_NA)
1776 {
1777 find_interesting_uses_op (data, *var_p);
1778 find_interesting_uses_op (data, *bound_p);
1779 return;
1780 }
1781
1782 record_group_use (data, var_p, var_iv, stmt, USE_COMPARE, NULL_TREE);
1783 /* Record compare type iv_use for iv on the other side of comparison. */
1784 if (ret == COMP_IV_EXPR_2)
1785 record_group_use (data, bound_p, bound_iv, stmt, USE_COMPARE, NULL_TREE);
1786 }
1787
1788 /* Returns the outermost loop EXPR is obviously invariant in
1789 relative to the loop LOOP, i.e. if all its operands are defined
1790 outside of the returned loop. Returns NULL if EXPR is not
1791 even obviously invariant in LOOP. */
1792
1793 class loop *
1794 outermost_invariant_loop_for_expr (class loop *loop, tree expr)
1795 {
1796 basic_block def_bb;
1797 unsigned i, len;
1798
1799 if (is_gimple_min_invariant (expr))
1800 return current_loops->tree_root;
1801
1802 if (TREE_CODE (expr) == SSA_NAME)
1803 {
1804 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1805 if (def_bb)
1806 {
1807 if (flow_bb_inside_loop_p (loop, def_bb))
1808 return NULL;
1809 return superloop_at_depth (loop,
1810 loop_depth (def_bb->loop_father) + 1);
1811 }
1812
1813 return current_loops->tree_root;
1814 }
1815
1816 if (!EXPR_P (expr))
1817 return NULL;
1818
1819 unsigned maxdepth = 0;
1820 len = TREE_OPERAND_LENGTH (expr);
1821 for (i = 0; i < len; i++)
1822 {
1823 class loop *ivloop;
1824 if (!TREE_OPERAND (expr, i))
1825 continue;
1826
1827 ivloop = outermost_invariant_loop_for_expr (loop, TREE_OPERAND (expr, i));
1828 if (!ivloop)
1829 return NULL;
1830 maxdepth = MAX (maxdepth, loop_depth (ivloop));
1831 }
1832
1833 return superloop_at_depth (loop, maxdepth);
1834 }
1835
1836 /* Returns true if expression EXPR is obviously invariant in LOOP,
1837 i.e. if all its operands are defined outside of the LOOP. LOOP
1838 should not be the function body. */
1839
1840 bool
1841 expr_invariant_in_loop_p (class loop *loop, tree expr)
1842 {
1843 basic_block def_bb;
1844 unsigned i, len;
1845
1846 gcc_assert (loop_depth (loop) > 0);
1847
1848 if (is_gimple_min_invariant (expr))
1849 return true;
1850
1851 if (TREE_CODE (expr) == SSA_NAME)
1852 {
1853 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1854 if (def_bb
1855 && flow_bb_inside_loop_p (loop, def_bb))
1856 return false;
1857
1858 return true;
1859 }
1860
1861 if (!EXPR_P (expr))
1862 return false;
1863
1864 len = TREE_OPERAND_LENGTH (expr);
1865 for (i = 0; i < len; i++)
1866 if (TREE_OPERAND (expr, i)
1867 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (expr, i)))
1868 return false;
1869
1870 return true;
1871 }
1872
1873 /* Given expression EXPR which computes inductive values with respect
1874 to loop recorded in DATA, this function returns biv from which EXPR
1875 is derived by tracing definition chains of ssa variables in EXPR. */
1876
1877 static struct iv*
1878 find_deriving_biv_for_expr (struct ivopts_data *data, tree expr)
1879 {
1880 struct iv *iv;
1881 unsigned i, n;
1882 tree e2, e1;
1883 enum tree_code code;
1884 gimple *stmt;
1885
1886 if (expr == NULL_TREE)
1887 return NULL;
1888
1889 if (is_gimple_min_invariant (expr))
1890 return NULL;
1891
1892 code = TREE_CODE (expr);
1893 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1894 {
1895 n = TREE_OPERAND_LENGTH (expr);
1896 for (i = 0; i < n; i++)
1897 {
1898 iv = find_deriving_biv_for_expr (data, TREE_OPERAND (expr, i));
1899 if (iv)
1900 return iv;
1901 }
1902 }
1903
1904 /* Stop if it's not ssa name. */
1905 if (code != SSA_NAME)
1906 return NULL;
1907
1908 iv = get_iv (data, expr);
1909 if (!iv || integer_zerop (iv->step))
1910 return NULL;
1911 else if (iv->biv_p)
1912 return iv;
1913
1914 stmt = SSA_NAME_DEF_STMT (expr);
1915 if (gphi *phi = dyn_cast <gphi *> (stmt))
1916 {
1917 ssa_op_iter iter;
1918 use_operand_p use_p;
1919 basic_block phi_bb = gimple_bb (phi);
1920
1921 /* Skip loop header PHI that doesn't define biv. */
1922 if (phi_bb->loop_father == data->current_loop)
1923 return NULL;
1924
1925 if (virtual_operand_p (gimple_phi_result (phi)))
1926 return NULL;
1927
1928 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
1929 {
1930 tree use = USE_FROM_PTR (use_p);
1931 iv = find_deriving_biv_for_expr (data, use);
1932 if (iv)
1933 return iv;
1934 }
1935 return NULL;
1936 }
1937 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1938 return NULL;
1939
1940 e1 = gimple_assign_rhs1 (stmt);
1941 code = gimple_assign_rhs_code (stmt);
1942 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1943 return find_deriving_biv_for_expr (data, e1);
1944
1945 switch (code)
1946 {
1947 case MULT_EXPR:
1948 case PLUS_EXPR:
1949 case MINUS_EXPR:
1950 case POINTER_PLUS_EXPR:
1951 /* Increments, decrements and multiplications by a constant
1952 are simple. */
1953 e2 = gimple_assign_rhs2 (stmt);
1954 iv = find_deriving_biv_for_expr (data, e2);
1955 if (iv)
1956 return iv;
1957 gcc_fallthrough ();
1958
1959 CASE_CONVERT:
1960 /* Casts are simple. */
1961 return find_deriving_biv_for_expr (data, e1);
1962
1963 default:
1964 break;
1965 }
1966
1967 return NULL;
1968 }
1969
1970 /* Record BIV, its predecessor and successor that they are used in
1971 address type uses. */
1972
1973 static void
1974 record_biv_for_address_use (struct ivopts_data *data, struct iv *biv)
1975 {
1976 unsigned i;
1977 tree type, base_1, base_2;
1978 bitmap_iterator bi;
1979
1980 if (!biv || !biv->biv_p || integer_zerop (biv->step)
1981 || biv->have_address_use || !biv->no_overflow)
1982 return;
1983
1984 type = TREE_TYPE (biv->base);
1985 if (!INTEGRAL_TYPE_P (type))
1986 return;
1987
1988 biv->have_address_use = true;
1989 data->bivs_not_used_in_addr--;
1990 base_1 = fold_build2 (PLUS_EXPR, type, biv->base, biv->step);
1991 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1992 {
1993 struct iv *iv = ver_info (data, i)->iv;
1994
1995 if (!iv || !iv->biv_p || integer_zerop (iv->step)
1996 || iv->have_address_use || !iv->no_overflow)
1997 continue;
1998
1999 if (type != TREE_TYPE (iv->base)
2000 || !INTEGRAL_TYPE_P (TREE_TYPE (iv->base)))
2001 continue;
2002
2003 if (!operand_equal_p (biv->step, iv->step, 0))
2004 continue;
2005
2006 base_2 = fold_build2 (PLUS_EXPR, type, iv->base, iv->step);
2007 if (operand_equal_p (base_1, iv->base, 0)
2008 || operand_equal_p (base_2, biv->base, 0))
2009 {
2010 iv->have_address_use = true;
2011 data->bivs_not_used_in_addr--;
2012 }
2013 }
2014 }
2015
2016 /* Cumulates the steps of indices into DATA and replaces their values with the
2017 initial ones. Returns false when the value of the index cannot be determined.
2018 Callback for for_each_index. */
2019
2020 struct ifs_ivopts_data
2021 {
2022 struct ivopts_data *ivopts_data;
2023 gimple *stmt;
2024 tree step;
2025 };
2026
2027 static bool
2028 idx_find_step (tree base, tree *idx, void *data)
2029 {
2030 struct ifs_ivopts_data *dta = (struct ifs_ivopts_data *) data;
2031 struct iv *iv;
2032 bool use_overflow_semantics = false;
2033 tree step, iv_base, iv_step, lbound, off;
2034 class loop *loop = dta->ivopts_data->current_loop;
2035
2036 /* If base is a component ref, require that the offset of the reference
2037 be invariant. */
2038 if (TREE_CODE (base) == COMPONENT_REF)
2039 {
2040 off = component_ref_field_offset (base);
2041 return expr_invariant_in_loop_p (loop, off);
2042 }
2043
2044 /* If base is array, first check whether we will be able to move the
2045 reference out of the loop (in order to take its address in strength
2046 reduction). In order for this to work we need both lower bound
2047 and step to be loop invariants. */
2048 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2049 {
2050 /* Moreover, for a range, the size needs to be invariant as well. */
2051 if (TREE_CODE (base) == ARRAY_RANGE_REF
2052 && !expr_invariant_in_loop_p (loop, TYPE_SIZE (TREE_TYPE (base))))
2053 return false;
2054
2055 step = array_ref_element_size (base);
2056 lbound = array_ref_low_bound (base);
2057
2058 if (!expr_invariant_in_loop_p (loop, step)
2059 || !expr_invariant_in_loop_p (loop, lbound))
2060 return false;
2061 }
2062
2063 if (TREE_CODE (*idx) != SSA_NAME)
2064 return true;
2065
2066 iv = get_iv (dta->ivopts_data, *idx);
2067 if (!iv)
2068 return false;
2069
2070 /* XXX We produce for a base of *D42 with iv->base being &x[0]
2071 *&x[0], which is not folded and does not trigger the
2072 ARRAY_REF path below. */
2073 *idx = iv->base;
2074
2075 if (integer_zerop (iv->step))
2076 return true;
2077
2078 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2079 {
2080 step = array_ref_element_size (base);
2081
2082 /* We only handle addresses whose step is an integer constant. */
2083 if (TREE_CODE (step) != INTEGER_CST)
2084 return false;
2085 }
2086 else
2087 /* The step for pointer arithmetics already is 1 byte. */
2088 step = size_one_node;
2089
2090 iv_base = iv->base;
2091 iv_step = iv->step;
2092 if (iv->no_overflow && nowrap_type_p (TREE_TYPE (iv_step)))
2093 use_overflow_semantics = true;
2094
2095 if (!convert_affine_scev (dta->ivopts_data->current_loop,
2096 sizetype, &iv_base, &iv_step, dta->stmt,
2097 use_overflow_semantics))
2098 {
2099 /* The index might wrap. */
2100 return false;
2101 }
2102
2103 step = fold_build2 (MULT_EXPR, sizetype, step, iv_step);
2104 dta->step = fold_build2 (PLUS_EXPR, sizetype, dta->step, step);
2105
2106 if (dta->ivopts_data->bivs_not_used_in_addr)
2107 {
2108 if (!iv->biv_p)
2109 iv = find_deriving_biv_for_expr (dta->ivopts_data, iv->ssa_name);
2110
2111 record_biv_for_address_use (dta->ivopts_data, iv);
2112 }
2113 return true;
2114 }
2115
2116 /* Records use in index IDX. Callback for for_each_index. Ivopts data
2117 object is passed to it in DATA. */
2118
2119 static bool
2120 idx_record_use (tree base, tree *idx,
2121 void *vdata)
2122 {
2123 struct ivopts_data *data = (struct ivopts_data *) vdata;
2124 find_interesting_uses_op (data, *idx);
2125 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
2126 {
2127 if (TREE_OPERAND (base, 2))
2128 find_interesting_uses_op (data, TREE_OPERAND (base, 2));
2129 if (TREE_OPERAND (base, 3))
2130 find_interesting_uses_op (data, TREE_OPERAND (base, 3));
2131 }
2132 return true;
2133 }
2134
2135 /* If we can prove that TOP = cst * BOT for some constant cst,
2136 store cst to MUL and return true. Otherwise return false.
2137 The returned value is always sign-extended, regardless of the
2138 signedness of TOP and BOT. */
2139
2140 static bool
2141 constant_multiple_of (tree top, tree bot, widest_int *mul)
2142 {
2143 tree mby;
2144 enum tree_code code;
2145 unsigned precision = TYPE_PRECISION (TREE_TYPE (top));
2146 widest_int res, p0, p1;
2147
2148 STRIP_NOPS (top);
2149 STRIP_NOPS (bot);
2150
2151 if (operand_equal_p (top, bot, 0))
2152 {
2153 *mul = 1;
2154 return true;
2155 }
2156
2157 code = TREE_CODE (top);
2158 switch (code)
2159 {
2160 case MULT_EXPR:
2161 mby = TREE_OPERAND (top, 1);
2162 if (TREE_CODE (mby) != INTEGER_CST)
2163 return false;
2164
2165 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
2166 return false;
2167
2168 *mul = wi::sext (res * wi::to_widest (mby), precision);
2169 return true;
2170
2171 case PLUS_EXPR:
2172 case MINUS_EXPR:
2173 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &p0)
2174 || !constant_multiple_of (TREE_OPERAND (top, 1), bot, &p1))
2175 return false;
2176
2177 if (code == MINUS_EXPR)
2178 p1 = -p1;
2179 *mul = wi::sext (p0 + p1, precision);
2180 return true;
2181
2182 case INTEGER_CST:
2183 if (TREE_CODE (bot) != INTEGER_CST)
2184 return false;
2185
2186 p0 = widest_int::from (wi::to_wide (top), SIGNED);
2187 p1 = widest_int::from (wi::to_wide (bot), SIGNED);
2188 if (p1 == 0)
2189 return false;
2190 *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
2191 return res == 0;
2192
2193 default:
2194 if (POLY_INT_CST_P (top)
2195 && POLY_INT_CST_P (bot)
2196 && constant_multiple_p (wi::to_poly_widest (top),
2197 wi::to_poly_widest (bot), mul))
2198 return true;
2199
2200 return false;
2201 }
2202 }
2203
2204 /* Return true if memory reference REF with step STEP may be unaligned. */
2205
2206 static bool
2207 may_be_unaligned_p (tree ref, tree step)
2208 {
2209 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
2210 thus they are not misaligned. */
2211 if (TREE_CODE (ref) == TARGET_MEM_REF)
2212 return false;
2213
2214 unsigned int align = TYPE_ALIGN (TREE_TYPE (ref));
2215 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref))) > align)
2216 align = GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref)));
2217
2218 unsigned HOST_WIDE_INT bitpos;
2219 unsigned int ref_align;
2220 get_object_alignment_1 (ref, &ref_align, &bitpos);
2221 if (ref_align < align
2222 || (bitpos % align) != 0
2223 || (bitpos % BITS_PER_UNIT) != 0)
2224 return true;
2225
2226 unsigned int trailing_zeros = tree_ctz (step);
2227 if (trailing_zeros < HOST_BITS_PER_INT
2228 && (1U << trailing_zeros) * BITS_PER_UNIT < align)
2229 return true;
2230
2231 return false;
2232 }
2233
2234 /* Return true if EXPR may be non-addressable. */
2235
2236 bool
2237 may_be_nonaddressable_p (tree expr)
2238 {
2239 switch (TREE_CODE (expr))
2240 {
2241 case VAR_DECL:
2242 /* Check if it's a register variable. */
2243 return DECL_HARD_REGISTER (expr);
2244
2245 case TARGET_MEM_REF:
2246 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
2247 target, thus they are always addressable. */
2248 return false;
2249
2250 case MEM_REF:
2251 /* Likewise for MEM_REFs, modulo the storage order. */
2252 return REF_REVERSE_STORAGE_ORDER (expr);
2253
2254 case BIT_FIELD_REF:
2255 if (REF_REVERSE_STORAGE_ORDER (expr))
2256 return true;
2257 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2258
2259 case COMPONENT_REF:
2260 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2261 return true;
2262 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1))
2263 || may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2264
2265 case ARRAY_REF:
2266 case ARRAY_RANGE_REF:
2267 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2268 return true;
2269 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2270
2271 case VIEW_CONVERT_EXPR:
2272 /* This kind of view-conversions may wrap non-addressable objects
2273 and make them look addressable. After some processing the
2274 non-addressability may be uncovered again, causing ADDR_EXPRs
2275 of inappropriate objects to be built. */
2276 if (is_gimple_reg (TREE_OPERAND (expr, 0))
2277 || !is_gimple_addressable (TREE_OPERAND (expr, 0)))
2278 return true;
2279 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2280
2281 CASE_CONVERT:
2282 return true;
2283
2284 default:
2285 break;
2286 }
2287
2288 return false;
2289 }
2290
2291 /* Finds addresses in *OP_P inside STMT. */
2292
2293 static void
2294 find_interesting_uses_address (struct ivopts_data *data, gimple *stmt,
2295 tree *op_p)
2296 {
2297 tree base = *op_p, step = size_zero_node;
2298 struct iv *civ;
2299 struct ifs_ivopts_data ifs_ivopts_data;
2300
2301 /* Do not play with volatile memory references. A bit too conservative,
2302 perhaps, but safe. */
2303 if (gimple_has_volatile_ops (stmt))
2304 goto fail;
2305
2306 /* Ignore bitfields for now. Not really something terribly complicated
2307 to handle. TODO. */
2308 if (TREE_CODE (base) == BIT_FIELD_REF)
2309 goto fail;
2310
2311 base = unshare_expr (base);
2312
2313 if (TREE_CODE (base) == TARGET_MEM_REF)
2314 {
2315 tree type = build_pointer_type (TREE_TYPE (base));
2316 tree astep;
2317
2318 if (TMR_BASE (base)
2319 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)
2320 {
2321 civ = get_iv (data, TMR_BASE (base));
2322 if (!civ)
2323 goto fail;
2324
2325 TMR_BASE (base) = civ->base;
2326 step = civ->step;
2327 }
2328 if (TMR_INDEX2 (base)
2329 && TREE_CODE (TMR_INDEX2 (base)) == SSA_NAME)
2330 {
2331 civ = get_iv (data, TMR_INDEX2 (base));
2332 if (!civ)
2333 goto fail;
2334
2335 TMR_INDEX2 (base) = civ->base;
2336 step = civ->step;
2337 }
2338 if (TMR_INDEX (base)
2339 && TREE_CODE (TMR_INDEX (base)) == SSA_NAME)
2340 {
2341 civ = get_iv (data, TMR_INDEX (base));
2342 if (!civ)
2343 goto fail;
2344
2345 TMR_INDEX (base) = civ->base;
2346 astep = civ->step;
2347
2348 if (astep)
2349 {
2350 if (TMR_STEP (base))
2351 astep = fold_build2 (MULT_EXPR, type, TMR_STEP (base), astep);
2352
2353 step = fold_build2 (PLUS_EXPR, type, step, astep);
2354 }
2355 }
2356
2357 if (integer_zerop (step))
2358 goto fail;
2359 base = tree_mem_ref_addr (type, base);
2360 }
2361 else
2362 {
2363 ifs_ivopts_data.ivopts_data = data;
2364 ifs_ivopts_data.stmt = stmt;
2365 ifs_ivopts_data.step = size_zero_node;
2366 if (!for_each_index (&base, idx_find_step, &ifs_ivopts_data)
2367 || integer_zerop (ifs_ivopts_data.step))
2368 goto fail;
2369 step = ifs_ivopts_data.step;
2370
2371 /* Check that the base expression is addressable. This needs
2372 to be done after substituting bases of IVs into it. */
2373 if (may_be_nonaddressable_p (base))
2374 goto fail;
2375
2376 /* Moreover, on strict alignment platforms, check that it is
2377 sufficiently aligned. */
2378 if (STRICT_ALIGNMENT && may_be_unaligned_p (base, step))
2379 goto fail;
2380
2381 base = build_fold_addr_expr (base);
2382
2383 /* Substituting bases of IVs into the base expression might
2384 have caused folding opportunities. */
2385 if (TREE_CODE (base) == ADDR_EXPR)
2386 {
2387 tree *ref = &TREE_OPERAND (base, 0);
2388 while (handled_component_p (*ref))
2389 ref = &TREE_OPERAND (*ref, 0);
2390 if (TREE_CODE (*ref) == MEM_REF)
2391 {
2392 tree tem = fold_binary (MEM_REF, TREE_TYPE (*ref),
2393 TREE_OPERAND (*ref, 0),
2394 TREE_OPERAND (*ref, 1));
2395 if (tem)
2396 *ref = tem;
2397 }
2398 }
2399 }
2400
2401 civ = alloc_iv (data, base, step);
2402 /* Fail if base object of this memory reference is unknown. */
2403 if (civ->base_object == NULL_TREE)
2404 goto fail;
2405
2406 record_group_use (data, op_p, civ, stmt, USE_REF_ADDRESS, TREE_TYPE (*op_p));
2407 return;
2408
2409 fail:
2410 for_each_index (op_p, idx_record_use, data);
2411 }
2412
2413 /* Finds and records invariants used in STMT. */
2414
2415 static void
2416 find_invariants_stmt (struct ivopts_data *data, gimple *stmt)
2417 {
2418 ssa_op_iter iter;
2419 use_operand_p use_p;
2420 tree op;
2421
2422 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2423 {
2424 op = USE_FROM_PTR (use_p);
2425 record_invariant (data, op, false);
2426 }
2427 }
2428
2429 /* CALL calls an internal function. If operand *OP_P will become an
2430 address when the call is expanded, return the type of the memory
2431 being addressed, otherwise return null. */
2432
2433 static tree
2434 get_mem_type_for_internal_fn (gcall *call, tree *op_p)
2435 {
2436 switch (gimple_call_internal_fn (call))
2437 {
2438 case IFN_MASK_LOAD:
2439 case IFN_MASK_LOAD_LANES:
2440 case IFN_LEN_LOAD:
2441 if (op_p == gimple_call_arg_ptr (call, 0))
2442 return TREE_TYPE (gimple_call_lhs (call));
2443 return NULL_TREE;
2444
2445 case IFN_MASK_STORE:
2446 case IFN_MASK_STORE_LANES:
2447 case IFN_LEN_STORE:
2448 if (op_p == gimple_call_arg_ptr (call, 0))
2449 return TREE_TYPE (gimple_call_arg (call, 3));
2450 return NULL_TREE;
2451
2452 default:
2453 return NULL_TREE;
2454 }
2455 }
2456
2457 /* IV is a (non-address) iv that describes operand *OP_P of STMT.
2458 Return true if the operand will become an address when STMT
2459 is expanded and record the associated address use if so. */
2460
2461 static bool
2462 find_address_like_use (struct ivopts_data *data, gimple *stmt, tree *op_p,
2463 struct iv *iv)
2464 {
2465 /* Fail if base object of this memory reference is unknown. */
2466 if (iv->base_object == NULL_TREE)
2467 return false;
2468
2469 tree mem_type = NULL_TREE;
2470 if (gcall *call = dyn_cast <gcall *> (stmt))
2471 if (gimple_call_internal_p (call))
2472 mem_type = get_mem_type_for_internal_fn (call, op_p);
2473 if (mem_type)
2474 {
2475 iv = alloc_iv (data, iv->base, iv->step);
2476 record_group_use (data, op_p, iv, stmt, USE_PTR_ADDRESS, mem_type);
2477 return true;
2478 }
2479 return false;
2480 }
2481
2482 /* Finds interesting uses of induction variables in the statement STMT. */
2483
2484 static void
2485 find_interesting_uses_stmt (struct ivopts_data *data, gimple *stmt)
2486 {
2487 struct iv *iv;
2488 tree op, *lhs, *rhs;
2489 ssa_op_iter iter;
2490 use_operand_p use_p;
2491 enum tree_code code;
2492
2493 find_invariants_stmt (data, stmt);
2494
2495 if (gimple_code (stmt) == GIMPLE_COND)
2496 {
2497 find_interesting_uses_cond (data, stmt);
2498 return;
2499 }
2500
2501 if (is_gimple_assign (stmt))
2502 {
2503 lhs = gimple_assign_lhs_ptr (stmt);
2504 rhs = gimple_assign_rhs1_ptr (stmt);
2505
2506 if (TREE_CODE (*lhs) == SSA_NAME)
2507 {
2508 /* If the statement defines an induction variable, the uses are not
2509 interesting by themselves. */
2510
2511 iv = get_iv (data, *lhs);
2512
2513 if (iv && !integer_zerop (iv->step))
2514 return;
2515 }
2516
2517 code = gimple_assign_rhs_code (stmt);
2518 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
2519 && (REFERENCE_CLASS_P (*rhs)
2520 || is_gimple_val (*rhs)))
2521 {
2522 if (REFERENCE_CLASS_P (*rhs))
2523 find_interesting_uses_address (data, stmt, rhs);
2524 else
2525 find_interesting_uses_op (data, *rhs);
2526
2527 if (REFERENCE_CLASS_P (*lhs))
2528 find_interesting_uses_address (data, stmt, lhs);
2529 return;
2530 }
2531 else if (TREE_CODE_CLASS (code) == tcc_comparison)
2532 {
2533 find_interesting_uses_cond (data, stmt);
2534 return;
2535 }
2536
2537 /* TODO -- we should also handle address uses of type
2538
2539 memory = call (whatever);
2540
2541 and
2542
2543 call (memory). */
2544 }
2545
2546 if (gimple_code (stmt) == GIMPLE_PHI
2547 && gimple_bb (stmt) == data->current_loop->header)
2548 {
2549 iv = get_iv (data, PHI_RESULT (stmt));
2550
2551 if (iv && !integer_zerop (iv->step))
2552 return;
2553 }
2554
2555 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2556 {
2557 op = USE_FROM_PTR (use_p);
2558
2559 if (TREE_CODE (op) != SSA_NAME)
2560 continue;
2561
2562 iv = get_iv (data, op);
2563 if (!iv)
2564 continue;
2565
2566 if (!find_address_like_use (data, stmt, use_p->use, iv))
2567 find_interesting_uses_op (data, op);
2568 }
2569 }
2570
2571 /* Finds interesting uses of induction variables outside of loops
2572 on loop exit edge EXIT. */
2573
2574 static void
2575 find_interesting_uses_outside (struct ivopts_data *data, edge exit)
2576 {
2577 gphi *phi;
2578 gphi_iterator psi;
2579 tree def;
2580
2581 for (psi = gsi_start_phis (exit->dest); !gsi_end_p (psi); gsi_next (&psi))
2582 {
2583 phi = psi.phi ();
2584 def = PHI_ARG_DEF_FROM_EDGE (phi, exit);
2585 if (!virtual_operand_p (def))
2586 find_interesting_uses_op (data, def);
2587 }
2588 }
2589
2590 /* Return TRUE if OFFSET is within the range of [base + offset] addressing
2591 mode for memory reference represented by USE. */
2592
2593 static GTY (()) vec<rtx, va_gc> *addr_list;
2594
2595 static bool
2596 addr_offset_valid_p (struct iv_use *use, poly_int64 offset)
2597 {
2598 rtx reg, addr;
2599 unsigned list_index;
2600 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (use->iv->base));
2601 machine_mode addr_mode, mem_mode = TYPE_MODE (use->mem_type);
2602
2603 list_index = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
2604 if (list_index >= vec_safe_length (addr_list))
2605 vec_safe_grow_cleared (addr_list, list_index + MAX_MACHINE_MODE, true);
2606
2607 addr = (*addr_list)[list_index];
2608 if (!addr)
2609 {
2610 addr_mode = targetm.addr_space.address_mode (as);
2611 reg = gen_raw_REG (addr_mode, LAST_VIRTUAL_REGISTER + 1);
2612 addr = gen_rtx_fmt_ee (PLUS, addr_mode, reg, NULL_RTX);
2613 (*addr_list)[list_index] = addr;
2614 }
2615 else
2616 addr_mode = GET_MODE (addr);
2617
2618 XEXP (addr, 1) = gen_int_mode (offset, addr_mode);
2619 return (memory_address_addr_space_p (mem_mode, addr, as));
2620 }
2621
2622 /* Comparison function to sort group in ascending order of addr_offset. */
2623
2624 static int
2625 group_compare_offset (const void *a, const void *b)
2626 {
2627 const struct iv_use *const *u1 = (const struct iv_use *const *) a;
2628 const struct iv_use *const *u2 = (const struct iv_use *const *) b;
2629
2630 return compare_sizes_for_sort ((*u1)->addr_offset, (*u2)->addr_offset);
2631 }
2632
2633 /* Check if small groups should be split. Return true if no group
2634 contains more than two uses with distinct addr_offsets. Return
2635 false otherwise. We want to split such groups because:
2636
2637 1) Small groups don't have much benefit and may interfer with
2638 general candidate selection.
2639 2) Size for problem with only small groups is usually small and
2640 general algorithm can handle it well.
2641
2642 TODO -- Above claim may not hold when we want to merge memory
2643 accesses with conseuctive addresses. */
2644
2645 static bool
2646 split_small_address_groups_p (struct ivopts_data *data)
2647 {
2648 unsigned int i, j, distinct = 1;
2649 struct iv_use *pre;
2650 struct iv_group *group;
2651
2652 for (i = 0; i < data->vgroups.length (); i++)
2653 {
2654 group = data->vgroups[i];
2655 if (group->vuses.length () == 1)
2656 continue;
2657
2658 gcc_assert (address_p (group->type));
2659 if (group->vuses.length () == 2)
2660 {
2661 if (compare_sizes_for_sort (group->vuses[0]->addr_offset,
2662 group->vuses[1]->addr_offset) > 0)
2663 std::swap (group->vuses[0], group->vuses[1]);
2664 }
2665 else
2666 group->vuses.qsort (group_compare_offset);
2667
2668 if (distinct > 2)
2669 continue;
2670
2671 distinct = 1;
2672 for (pre = group->vuses[0], j = 1; j < group->vuses.length (); j++)
2673 {
2674 if (maybe_ne (group->vuses[j]->addr_offset, pre->addr_offset))
2675 {
2676 pre = group->vuses[j];
2677 distinct++;
2678 }
2679
2680 if (distinct > 2)
2681 break;
2682 }
2683 }
2684
2685 return (distinct <= 2);
2686 }
2687
2688 /* For each group of address type uses, this function further groups
2689 these uses according to the maximum offset supported by target's
2690 [base + offset] addressing mode. */
2691
2692 static void
2693 split_address_groups (struct ivopts_data *data)
2694 {
2695 unsigned int i, j;
2696 /* Always split group. */
2697 bool split_p = split_small_address_groups_p (data);
2698
2699 for (i = 0; i < data->vgroups.length (); i++)
2700 {
2701 struct iv_group *new_group = NULL;
2702 struct iv_group *group = data->vgroups[i];
2703 struct iv_use *use = group->vuses[0];
2704
2705 use->id = 0;
2706 use->group_id = group->id;
2707 if (group->vuses.length () == 1)
2708 continue;
2709
2710 gcc_assert (address_p (use->type));
2711
2712 for (j = 1; j < group->vuses.length ();)
2713 {
2714 struct iv_use *next = group->vuses[j];
2715 poly_int64 offset = next->addr_offset - use->addr_offset;
2716
2717 /* Split group if aksed to, or the offset against the first
2718 use can't fit in offset part of addressing mode. IV uses
2719 having the same offset are still kept in one group. */
2720 if (maybe_ne (offset, 0)
2721 && (split_p || !addr_offset_valid_p (use, offset)))
2722 {
2723 if (!new_group)
2724 new_group = record_group (data, group->type);
2725 group->vuses.ordered_remove (j);
2726 new_group->vuses.safe_push (next);
2727 continue;
2728 }
2729
2730 next->id = j;
2731 next->group_id = group->id;
2732 j++;
2733 }
2734 }
2735 }
2736
2737 /* Finds uses of the induction variables that are interesting. */
2738
2739 static void
2740 find_interesting_uses (struct ivopts_data *data, basic_block *body)
2741 {
2742 basic_block bb;
2743 gimple_stmt_iterator bsi;
2744 unsigned i;
2745 edge e;
2746
2747 for (i = 0; i < data->current_loop->num_nodes; i++)
2748 {
2749 edge_iterator ei;
2750 bb = body[i];
2751
2752 FOR_EACH_EDGE (e, ei, bb->succs)
2753 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2754 && !flow_bb_inside_loop_p (data->current_loop, e->dest))
2755 find_interesting_uses_outside (data, e);
2756
2757 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2758 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2759 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2760 if (!is_gimple_debug (gsi_stmt (bsi)))
2761 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2762 }
2763
2764 split_address_groups (data);
2765
2766 if (dump_file && (dump_flags & TDF_DETAILS))
2767 {
2768 fprintf (dump_file, "\n<IV Groups>:\n");
2769 dump_groups (dump_file, data);
2770 fprintf (dump_file, "\n");
2771 }
2772 }
2773
2774 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2775 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2776 we are at the top-level of the processed address. */
2777
2778 static tree
2779 strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
2780 poly_int64 *offset)
2781 {
2782 tree op0 = NULL_TREE, op1 = NULL_TREE, tmp, step;
2783 enum tree_code code;
2784 tree type, orig_type = TREE_TYPE (expr);
2785 poly_int64 off0, off1;
2786 HOST_WIDE_INT st;
2787 tree orig_expr = expr;
2788
2789 STRIP_NOPS (expr);
2790
2791 type = TREE_TYPE (expr);
2792 code = TREE_CODE (expr);
2793 *offset = 0;
2794
2795 switch (code)
2796 {
2797 case POINTER_PLUS_EXPR:
2798 case PLUS_EXPR:
2799 case MINUS_EXPR:
2800 op0 = TREE_OPERAND (expr, 0);
2801 op1 = TREE_OPERAND (expr, 1);
2802
2803 op0 = strip_offset_1 (op0, false, false, &off0);
2804 op1 = strip_offset_1 (op1, false, false, &off1);
2805
2806 *offset = (code == MINUS_EXPR ? off0 - off1 : off0 + off1);
2807 if (op0 == TREE_OPERAND (expr, 0)
2808 && op1 == TREE_OPERAND (expr, 1))
2809 return orig_expr;
2810
2811 if (integer_zerop (op1))
2812 expr = op0;
2813 else if (integer_zerop (op0))
2814 {
2815 if (code == MINUS_EXPR)
2816 expr = fold_build1 (NEGATE_EXPR, type, op1);
2817 else
2818 expr = op1;
2819 }
2820 else
2821 expr = fold_build2 (code, type, op0, op1);
2822
2823 return fold_convert (orig_type, expr);
2824
2825 case MULT_EXPR:
2826 op1 = TREE_OPERAND (expr, 1);
2827 if (!cst_and_fits_in_hwi (op1))
2828 return orig_expr;
2829
2830 op0 = TREE_OPERAND (expr, 0);
2831 op0 = strip_offset_1 (op0, false, false, &off0);
2832 if (op0 == TREE_OPERAND (expr, 0))
2833 return orig_expr;
2834
2835 *offset = off0 * int_cst_value (op1);
2836 if (integer_zerop (op0))
2837 expr = op0;
2838 else
2839 expr = fold_build2 (MULT_EXPR, type, op0, op1);
2840
2841 return fold_convert (orig_type, expr);
2842
2843 case ARRAY_REF:
2844 case ARRAY_RANGE_REF:
2845 if (!inside_addr)
2846 return orig_expr;
2847
2848 step = array_ref_element_size (expr);
2849 if (!cst_and_fits_in_hwi (step))
2850 break;
2851
2852 st = int_cst_value (step);
2853 op1 = TREE_OPERAND (expr, 1);
2854 op1 = strip_offset_1 (op1, false, false, &off1);
2855 *offset = off1 * st;
2856
2857 if (top_compref
2858 && integer_zerop (op1))
2859 {
2860 /* Strip the component reference completely. */
2861 op0 = TREE_OPERAND (expr, 0);
2862 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2863 *offset += off0;
2864 return op0;
2865 }
2866 break;
2867
2868 case COMPONENT_REF:
2869 {
2870 tree field;
2871
2872 if (!inside_addr)
2873 return orig_expr;
2874
2875 tmp = component_ref_field_offset (expr);
2876 field = TREE_OPERAND (expr, 1);
2877 if (top_compref
2878 && cst_and_fits_in_hwi (tmp)
2879 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field)))
2880 {
2881 HOST_WIDE_INT boffset, abs_off;
2882
2883 /* Strip the component reference completely. */
2884 op0 = TREE_OPERAND (expr, 0);
2885 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2886 boffset = int_cst_value (DECL_FIELD_BIT_OFFSET (field));
2887 abs_off = abs_hwi (boffset) / BITS_PER_UNIT;
2888 if (boffset < 0)
2889 abs_off = -abs_off;
2890
2891 *offset = off0 + int_cst_value (tmp) + abs_off;
2892 return op0;
2893 }
2894 }
2895 break;
2896
2897 case ADDR_EXPR:
2898 op0 = TREE_OPERAND (expr, 0);
2899 op0 = strip_offset_1 (op0, true, true, &off0);
2900 *offset += off0;
2901
2902 if (op0 == TREE_OPERAND (expr, 0))
2903 return orig_expr;
2904
2905 expr = build_fold_addr_expr (op0);
2906 return fold_convert (orig_type, expr);
2907
2908 case MEM_REF:
2909 /* ??? Offset operand? */
2910 inside_addr = false;
2911 break;
2912
2913 default:
2914 if (ptrdiff_tree_p (expr, offset) && maybe_ne (*offset, 0))
2915 return build_int_cst (orig_type, 0);
2916 return orig_expr;
2917 }
2918
2919 /* Default handling of expressions for that we want to recurse into
2920 the first operand. */
2921 op0 = TREE_OPERAND (expr, 0);
2922 op0 = strip_offset_1 (op0, inside_addr, false, &off0);
2923 *offset += off0;
2924
2925 if (op0 == TREE_OPERAND (expr, 0)
2926 && (!op1 || op1 == TREE_OPERAND (expr, 1)))
2927 return orig_expr;
2928
2929 expr = copy_node (expr);
2930 TREE_OPERAND (expr, 0) = op0;
2931 if (op1)
2932 TREE_OPERAND (expr, 1) = op1;
2933
2934 /* Inside address, we might strip the top level component references,
2935 thus changing type of the expression. Handling of ADDR_EXPR
2936 will fix that. */
2937 expr = fold_convert (orig_type, expr);
2938
2939 return expr;
2940 }
2941
2942 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2943
2944 tree
2945 strip_offset (tree expr, poly_uint64_pod *offset)
2946 {
2947 poly_int64 off;
2948 tree core = strip_offset_1 (expr, false, false, &off);
2949 *offset = off;
2950 return core;
2951 }
2952
2953 /* Returns variant of TYPE that can be used as base for different uses.
2954 We return unsigned type with the same precision, which avoids problems
2955 with overflows. */
2956
2957 static tree
2958 generic_type_for (tree type)
2959 {
2960 if (POINTER_TYPE_P (type))
2961 return unsigned_type_for (type);
2962
2963 if (TYPE_UNSIGNED (type))
2964 return type;
2965
2966 return unsigned_type_for (type);
2967 }
2968
2969 /* Private data for walk_tree. */
2970
2971 struct walk_tree_data
2972 {
2973 bitmap *inv_vars;
2974 struct ivopts_data *idata;
2975 };
2976
2977 /* Callback function for walk_tree, it records invariants and symbol
2978 reference in *EXPR_P. DATA is the structure storing result info. */
2979
2980 static tree
2981 find_inv_vars_cb (tree *expr_p, int *ws ATTRIBUTE_UNUSED, void *data)
2982 {
2983 tree op = *expr_p;
2984 struct version_info *info;
2985 struct walk_tree_data *wdata = (struct walk_tree_data*) data;
2986
2987 if (TREE_CODE (op) != SSA_NAME)
2988 return NULL_TREE;
2989
2990 info = name_info (wdata->idata, op);
2991 /* Because we expand simple operations when finding IVs, loop invariant
2992 variable that isn't referred by the original loop could be used now.
2993 Record such invariant variables here. */
2994 if (!info->iv)
2995 {
2996 struct ivopts_data *idata = wdata->idata;
2997 basic_block bb = gimple_bb (SSA_NAME_DEF_STMT (op));
2998
2999 if (!bb || !flow_bb_inside_loop_p (idata->current_loop, bb))
3000 {
3001 tree steptype = TREE_TYPE (op);
3002 if (POINTER_TYPE_P (steptype))
3003 steptype = sizetype;
3004 set_iv (idata, op, op, build_int_cst (steptype, 0), true);
3005 record_invariant (idata, op, false);
3006 }
3007 }
3008 if (!info->inv_id || info->has_nonlin_use)
3009 return NULL_TREE;
3010
3011 if (!*wdata->inv_vars)
3012 *wdata->inv_vars = BITMAP_ALLOC (NULL);
3013 bitmap_set_bit (*wdata->inv_vars, info->inv_id);
3014
3015 return NULL_TREE;
3016 }
3017
3018 /* Records invariants in *EXPR_P. INV_VARS is the bitmap to that we should
3019 store it. */
3020
3021 static inline void
3022 find_inv_vars (struct ivopts_data *data, tree *expr_p, bitmap *inv_vars)
3023 {
3024 struct walk_tree_data wdata;
3025
3026 if (!inv_vars)
3027 return;
3028
3029 wdata.idata = data;
3030 wdata.inv_vars = inv_vars;
3031 walk_tree (expr_p, find_inv_vars_cb, &wdata, NULL);
3032 }
3033
3034 /* Get entry from invariant expr hash table for INV_EXPR. New entry
3035 will be recorded if it doesn't exist yet. Given below two exprs:
3036 inv_expr + cst1, inv_expr + cst2
3037 It's hard to make decision whether constant part should be stripped
3038 or not. We choose to not strip based on below facts:
3039 1) We need to count ADD cost for constant part if it's stripped,
3040 which isn't always trivial where this functions is called.
3041 2) Stripping constant away may be conflict with following loop
3042 invariant hoisting pass.
3043 3) Not stripping constant away results in more invariant exprs,
3044 which usually leads to decision preferring lower reg pressure. */
3045
3046 static iv_inv_expr_ent *
3047 get_loop_invariant_expr (struct ivopts_data *data, tree inv_expr)
3048 {
3049 STRIP_NOPS (inv_expr);
3050
3051 if (poly_int_tree_p (inv_expr)
3052 || TREE_CODE (inv_expr) == SSA_NAME)
3053 return NULL;
3054
3055 /* Don't strip constant part away as we used to. */
3056
3057 /* Stores EXPR in DATA->inv_expr_tab, return pointer to iv_inv_expr_ent. */
3058 struct iv_inv_expr_ent ent;
3059 ent.expr = inv_expr;
3060 ent.hash = iterative_hash_expr (inv_expr, 0);
3061 struct iv_inv_expr_ent **slot = data->inv_expr_tab->find_slot (&ent, INSERT);
3062
3063 if (!*slot)
3064 {
3065 *slot = XNEW (struct iv_inv_expr_ent);
3066 (*slot)->expr = inv_expr;
3067 (*slot)->hash = ent.hash;
3068 (*slot)->id = ++data->max_inv_expr_id;
3069 }
3070
3071 return *slot;
3072 }
3073
3074 /* Return TRUE iff VAR is marked as maybe-undefined. See
3075 mark_ssa_maybe_undefs. */
3076
3077 static inline bool
3078 ssa_name_maybe_undef_p (tree var)
3079 {
3080 gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
3081 return TREE_VISITED (var);
3082 }
3083
3084 /* Set (or clear, depending on VALUE) VAR's maybe-undefined mark. */
3085
3086 static inline void
3087 ssa_name_set_maybe_undef (tree var, bool value = true)
3088 {
3089 gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
3090 TREE_VISITED (var) = value;
3091 }
3092
3093 /* Return TRUE iff there are any non-PHI uses of VAR that dominate the
3094 end of BB. If we return TRUE and BB is a loop header, then VAR we
3095 be assumed to be defined within the loop, even if it is marked as
3096 maybe-undefined. */
3097
3098 static inline bool
3099 ssa_name_any_use_dominates_bb_p (tree var, basic_block bb)
3100 {
3101 imm_use_iterator iter;
3102 use_operand_p use_p;
3103 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3104 {
3105 if (is_a <gphi *> (USE_STMT (use_p))
3106 || is_gimple_debug (USE_STMT (use_p)))
3107 continue;
3108 basic_block dombb = gimple_bb (USE_STMT (use_p));
3109 if (dominated_by_p (CDI_DOMINATORS, bb, dombb))
3110 return true;
3111 }
3112
3113 return false;
3114 }
3115
3116 /* Mark as maybe_undef any SSA_NAMEs that are unsuitable as ivopts
3117 candidates for potentially involving undefined behavior. */
3118
3119 static void
3120 mark_ssa_maybe_undefs (void)
3121 {
3122 auto_vec<tree> queue;
3123
3124 /* Scan all SSA_NAMEs, marking the definitely-undefined ones as
3125 maybe-undefined and queuing them for propagation, while clearing
3126 the mark on others. */
3127 unsigned int i;
3128 tree var;
3129 FOR_EACH_SSA_NAME (i, var, cfun)
3130 {
3131 if (SSA_NAME_IS_VIRTUAL_OPERAND (var)
3132 || !ssa_undefined_value_p (var, false))
3133 ssa_name_set_maybe_undef (var, false);
3134 else
3135 {
3136 ssa_name_set_maybe_undef (var);
3137 queue.safe_push (var);
3138 if (dump_file && (dump_flags & TDF_DETAILS))
3139 fprintf (dump_file, "marking _%i as maybe-undef\n",
3140 SSA_NAME_VERSION (var));
3141 }
3142 }
3143
3144 /* Now propagate maybe-undefined from a DEF to any other PHI that
3145 uses it, as long as there isn't any intervening use of DEF. */
3146 while (!queue.is_empty ())
3147 {
3148 var = queue.pop ();
3149 imm_use_iterator iter;
3150 use_operand_p use_p;
3151 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3152 {
3153 /* Any uses of VAR that aren't PHI args imply VAR must be
3154 defined, otherwise undefined behavior would have been
3155 definitely invoked. Only PHI args may hold
3156 maybe-undefined values without invoking undefined
3157 behavior for that reason alone. */
3158 if (!is_a <gphi *> (USE_STMT (use_p)))
3159 continue;
3160 gphi *phi = as_a <gphi *> (USE_STMT (use_p));
3161
3162 tree def = gimple_phi_result (phi);
3163 if (ssa_name_maybe_undef_p (def))
3164 continue;
3165
3166 /* Look for any uses of the maybe-unused SSA_NAME that
3167 dominates the block that reaches the incoming block
3168 corresponding to the PHI arg in which it is mentioned.
3169 That means we can assume the SSA_NAME is defined in that
3170 path, so we only mark a PHI result as maybe-undef if we
3171 find an unused reaching SSA_NAME. */
3172 int idx = phi_arg_index_from_use (use_p);
3173 basic_block bb = gimple_phi_arg_edge (phi, idx)->src;
3174 if (ssa_name_any_use_dominates_bb_p (var, bb))
3175 continue;
3176
3177 ssa_name_set_maybe_undef (def);
3178 queue.safe_push (def);
3179 if (dump_file && (dump_flags & TDF_DETAILS))
3180 fprintf (dump_file, "marking _%i as maybe-undef because of _%i\n",
3181 SSA_NAME_VERSION (def), SSA_NAME_VERSION (var));
3182 }
3183 }
3184 }
3185
3186 /* Return *TP if it is an SSA_NAME marked with TREE_VISITED, i.e., as
3187 unsuitable as ivopts candidates for potentially involving undefined
3188 behavior. */
3189
3190 static tree
3191 find_ssa_undef (tree *tp, int *walk_subtrees, void *bb_)
3192 {
3193 basic_block bb = (basic_block) bb_;
3194 if (TREE_CODE (*tp) == SSA_NAME
3195 && ssa_name_maybe_undef_p (*tp)
3196 && !ssa_name_any_use_dominates_bb_p (*tp, bb))
3197 return *tp;
3198 if (!EXPR_P (*tp))
3199 *walk_subtrees = 0;
3200 return NULL;
3201 }
3202
3203 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3204 position to POS. If USE is not NULL, the candidate is set as related to
3205 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
3206 replacement of the final value of the iv by a direct computation. */
3207
3208 static struct iv_cand *
3209 add_candidate_1 (struct ivopts_data *data, tree base, tree step, bool important,
3210 enum iv_position pos, struct iv_use *use,
3211 gimple *incremented_at, struct iv *orig_iv = NULL,
3212 bool doloop = false)
3213 {
3214 unsigned i;
3215 struct iv_cand *cand = NULL;
3216 tree type, orig_type;
3217
3218 gcc_assert (base && step);
3219
3220 /* -fkeep-gc-roots-live means that we have to keep a real pointer
3221 live, but the ivopts code may replace a real pointer with one
3222 pointing before or after the memory block that is then adjusted
3223 into the memory block during the loop. FIXME: It would likely be
3224 better to actually force the pointer live and still use ivopts;
3225 for example, it would be enough to write the pointer into memory
3226 and keep it there until after the loop. */
3227 if (flag_keep_gc_roots_live && POINTER_TYPE_P (TREE_TYPE (base)))
3228 return NULL;
3229
3230 /* If BASE contains undefined SSA names make sure we only record
3231 the original IV. */
3232 bool involves_undefs = false;
3233 if (walk_tree (&base, find_ssa_undef, data->current_loop->header, NULL))
3234 {
3235 if (pos != IP_ORIGINAL)
3236 return NULL;
3237 important = false;
3238 involves_undefs = true;
3239 }
3240
3241 /* For non-original variables, make sure their values are computed in a type
3242 that does not invoke undefined behavior on overflows (since in general,
3243 we cannot prove that these induction variables are non-wrapping). */
3244 if (pos != IP_ORIGINAL)
3245 {
3246 orig_type = TREE_TYPE (base);
3247 type = generic_type_for (orig_type);
3248 if (type != orig_type)
3249 {
3250 base = fold_convert (type, base);
3251 step = fold_convert (type, step);
3252 }
3253 }
3254
3255 for (i = 0; i < data->vcands.length (); i++)
3256 {
3257 cand = data->vcands[i];
3258
3259 if (cand->pos != pos)
3260 continue;
3261
3262 if (cand->incremented_at != incremented_at
3263 || ((pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
3264 && cand->ainc_use != use))
3265 continue;
3266
3267 if (operand_equal_p (base, cand->iv->base, 0)
3268 && operand_equal_p (step, cand->iv->step, 0)
3269 && (TYPE_PRECISION (TREE_TYPE (base))
3270 == TYPE_PRECISION (TREE_TYPE (cand->iv->base))))
3271 break;
3272 }
3273
3274 if (i == data->vcands.length ())
3275 {
3276 cand = XCNEW (struct iv_cand);
3277 cand->id = i;
3278 cand->iv = alloc_iv (data, base, step);
3279 cand->pos = pos;
3280 if (pos != IP_ORIGINAL)
3281 {
3282 if (doloop)
3283 cand->var_before = create_tmp_var_raw (TREE_TYPE (base), "doloop");
3284 else
3285 cand->var_before = create_tmp_var_raw (TREE_TYPE (base), "ivtmp");
3286 cand->var_after = cand->var_before;
3287 }
3288 cand->important = important;
3289 cand->involves_undefs = involves_undefs;
3290 cand->incremented_at = incremented_at;
3291 cand->doloop_p = doloop;
3292 data->vcands.safe_push (cand);
3293
3294 if (!poly_int_tree_p (step))
3295 {
3296 find_inv_vars (data, &step, &cand->inv_vars);
3297
3298 iv_inv_expr_ent *inv_expr = get_loop_invariant_expr (data, step);
3299 /* Share bitmap between inv_vars and inv_exprs for cand. */
3300 if (inv_expr != NULL)
3301 {
3302 cand->inv_exprs = cand->inv_vars;
3303 cand->inv_vars = NULL;
3304 if (cand->inv_exprs)
3305 bitmap_clear (cand->inv_exprs);
3306 else
3307 cand->inv_exprs = BITMAP_ALLOC (NULL);
3308
3309 bitmap_set_bit (cand->inv_exprs, inv_expr->id);
3310 }
3311 }
3312
3313 if (pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
3314 cand->ainc_use = use;
3315 else
3316 cand->ainc_use = NULL;
3317
3318 cand->orig_iv = orig_iv;
3319 if (dump_file && (dump_flags & TDF_DETAILS))
3320 dump_cand (dump_file, cand);
3321 }
3322
3323 cand->important |= important;
3324 cand->doloop_p |= doloop;
3325
3326 /* Relate candidate to the group for which it is added. */
3327 if (use)
3328 bitmap_set_bit (data->vgroups[use->group_id]->related_cands, i);
3329
3330 return cand;
3331 }
3332
3333 /* Returns true if incrementing the induction variable at the end of the LOOP
3334 is allowed.
3335
3336 The purpose is to avoid splitting latch edge with a biv increment, thus
3337 creating a jump, possibly confusing other optimization passes and leaving
3338 less freedom to scheduler. So we allow IP_END only if IP_NORMAL is not
3339 available (so we do not have a better alternative), or if the latch edge
3340 is already nonempty. */
3341
3342 static bool
3343 allow_ip_end_pos_p (class loop *loop)
3344 {
3345 if (!ip_normal_pos (loop))
3346 return true;
3347
3348 if (!empty_block_p (ip_end_pos (loop)))
3349 return true;
3350
3351 return false;
3352 }
3353
3354 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
3355 Important field is set to IMPORTANT. */
3356
3357 static void
3358 add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
3359 bool important, struct iv_use *use)
3360 {
3361 basic_block use_bb = gimple_bb (use->stmt);
3362 machine_mode mem_mode;
3363 unsigned HOST_WIDE_INT cstepi;
3364
3365 /* If we insert the increment in any position other than the standard
3366 ones, we must ensure that it is incremented once per iteration.
3367 It must not be in an inner nested loop, or one side of an if
3368 statement. */
3369 if (use_bb->loop_father != data->current_loop
3370 || !dominated_by_p (CDI_DOMINATORS, data->current_loop->latch, use_bb)
3371 || stmt_can_throw_internal (cfun, use->stmt)
3372 || !cst_and_fits_in_hwi (step))
3373 return;
3374
3375 cstepi = int_cst_value (step);
3376
3377 mem_mode = TYPE_MODE (use->mem_type);
3378 if (((USE_LOAD_PRE_INCREMENT (mem_mode)
3379 || USE_STORE_PRE_INCREMENT (mem_mode))
3380 && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
3381 || ((USE_LOAD_PRE_DECREMENT (mem_mode)
3382 || USE_STORE_PRE_DECREMENT (mem_mode))
3383 && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
3384 {
3385 enum tree_code code = MINUS_EXPR;
3386 tree new_base;
3387 tree new_step = step;
3388
3389 if (POINTER_TYPE_P (TREE_TYPE (base)))
3390 {
3391 new_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
3392 code = POINTER_PLUS_EXPR;
3393 }
3394 else
3395 new_step = fold_convert (TREE_TYPE (base), new_step);
3396 new_base = fold_build2 (code, TREE_TYPE (base), base, new_step);
3397 add_candidate_1 (data, new_base, step, important, IP_BEFORE_USE, use,
3398 use->stmt);
3399 }
3400 if (((USE_LOAD_POST_INCREMENT (mem_mode)
3401 || USE_STORE_POST_INCREMENT (mem_mode))
3402 && known_eq (GET_MODE_SIZE (mem_mode), cstepi))
3403 || ((USE_LOAD_POST_DECREMENT (mem_mode)
3404 || USE_STORE_POST_DECREMENT (mem_mode))
3405 && known_eq (GET_MODE_SIZE (mem_mode), -cstepi)))
3406 {
3407 add_candidate_1 (data, base, step, important, IP_AFTER_USE, use,
3408 use->stmt);
3409 }
3410 }
3411
3412 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3413 position to POS. If USE is not NULL, the candidate is set as related to
3414 it. The candidate computation is scheduled before exit condition and at
3415 the end of loop. */
3416
3417 static void
3418 add_candidate (struct ivopts_data *data, tree base, tree step, bool important,
3419 struct iv_use *use, struct iv *orig_iv = NULL,
3420 bool doloop = false)
3421 {
3422 if (ip_normal_pos (data->current_loop))
3423 add_candidate_1 (data, base, step, important, IP_NORMAL, use, NULL, orig_iv,
3424 doloop);
3425 /* Exclude doloop candidate here since it requires decrement then comparison
3426 and jump, the IP_END position doesn't match. */
3427 if (!doloop && ip_end_pos (data->current_loop)
3428 && allow_ip_end_pos_p (data->current_loop))
3429 add_candidate_1 (data, base, step, important, IP_END, use, NULL, orig_iv);
3430 }
3431
3432 /* Adds standard iv candidates. */
3433
3434 static void
3435 add_standard_iv_candidates (struct ivopts_data *data)
3436 {
3437 add_candidate (data, integer_zero_node, integer_one_node, true, NULL);
3438
3439 /* The same for a double-integer type if it is still fast enough. */
3440 if (TYPE_PRECISION
3441 (long_integer_type_node) > TYPE_PRECISION (integer_type_node)
3442 && TYPE_PRECISION (long_integer_type_node) <= BITS_PER_WORD)
3443 add_candidate (data, build_int_cst (long_integer_type_node, 0),
3444 build_int_cst (long_integer_type_node, 1), true, NULL);
3445
3446 /* The same for a double-integer type if it is still fast enough. */
3447 if (TYPE_PRECISION
3448 (long_long_integer_type_node) > TYPE_PRECISION (long_integer_type_node)
3449 && TYPE_PRECISION (long_long_integer_type_node) <= BITS_PER_WORD)
3450 add_candidate (data, build_int_cst (long_long_integer_type_node, 0),
3451 build_int_cst (long_long_integer_type_node, 1), true, NULL);
3452 }
3453
3454
3455 /* Adds candidates bases on the old induction variable IV. */
3456
3457 static void
3458 add_iv_candidate_for_biv (struct ivopts_data *data, struct iv *iv)
3459 {
3460 gimple *phi;
3461 tree def;
3462 struct iv_cand *cand;
3463
3464 /* Check if this biv is used in address type use. */
3465 if (iv->no_overflow && iv->have_address_use
3466 && INTEGRAL_TYPE_P (TREE_TYPE (iv->base))
3467 && TYPE_PRECISION (TREE_TYPE (iv->base)) < TYPE_PRECISION (sizetype))
3468 {
3469 tree base = fold_convert (sizetype, iv->base);
3470 tree step = fold_convert (sizetype, iv->step);
3471
3472 /* Add iv cand of same precision as index part in TARGET_MEM_REF. */
3473 add_candidate (data, base, step, true, NULL, iv);
3474 /* Add iv cand of the original type only if it has nonlinear use. */
3475 if (iv->nonlin_use)
3476 add_candidate (data, iv->base, iv->step, true, NULL);
3477 }
3478 else
3479 add_candidate (data, iv->base, iv->step, true, NULL);
3480
3481 /* The same, but with initial value zero. */
3482 if (POINTER_TYPE_P (TREE_TYPE (iv->base)))
3483 add_candidate (data, size_int (0), iv->step, true, NULL);
3484 else
3485 add_candidate (data, build_int_cst (TREE_TYPE (iv->base), 0),
3486 iv->step, true, NULL);
3487
3488 phi = SSA_NAME_DEF_STMT (iv->ssa_name);
3489 if (gimple_code (phi) == GIMPLE_PHI)
3490 {
3491 /* Additionally record the possibility of leaving the original iv
3492 untouched. */
3493 def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (data->current_loop));
3494 /* Don't add candidate if it's from another PHI node because
3495 it's an affine iv appearing in the form of PEELED_CHREC. */
3496 phi = SSA_NAME_DEF_STMT (def);
3497 if (gimple_code (phi) != GIMPLE_PHI)
3498 {
3499 cand = add_candidate_1 (data,
3500 iv->base, iv->step, true, IP_ORIGINAL, NULL,
3501 SSA_NAME_DEF_STMT (def));
3502 if (cand)
3503 {
3504 cand->var_before = iv->ssa_name;
3505 cand->var_after = def;
3506 }
3507 }
3508 else
3509 gcc_assert (gimple_bb (phi) == data->current_loop->header);
3510 }
3511 }
3512
3513 /* Adds candidates based on the old induction variables. */
3514
3515 static void
3516 add_iv_candidate_for_bivs (struct ivopts_data *data)
3517 {
3518 unsigned i;
3519 struct iv *iv;
3520 bitmap_iterator bi;
3521
3522 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
3523 {
3524 iv = ver_info (data, i)->iv;
3525 if (iv && iv->biv_p && !integer_zerop (iv->step))
3526 add_iv_candidate_for_biv (data, iv);
3527 }
3528 }
3529
3530 /* Record common candidate {BASE, STEP} derived from USE in hashtable. */
3531
3532 static void
3533 record_common_cand (struct ivopts_data *data, tree base,
3534 tree step, struct iv_use *use)
3535 {
3536 class iv_common_cand ent;
3537 class iv_common_cand **slot;
3538
3539 ent.base = base;
3540 ent.step = step;
3541 ent.hash = iterative_hash_expr (base, 0);
3542 ent.hash = iterative_hash_expr (step, ent.hash);
3543
3544 slot = data->iv_common_cand_tab->find_slot (&ent, INSERT);
3545 if (*slot == NULL)
3546 {
3547 *slot = new iv_common_cand ();
3548 (*slot)->base = base;
3549 (*slot)->step = step;
3550 (*slot)->uses.create (8);
3551 (*slot)->hash = ent.hash;
3552 data->iv_common_cands.safe_push ((*slot));
3553 }
3554
3555 gcc_assert (use != NULL);
3556 (*slot)->uses.safe_push (use);
3557 return;
3558 }
3559
3560 /* Comparison function used to sort common candidates. */
3561
3562 static int
3563 common_cand_cmp (const void *p1, const void *p2)
3564 {
3565 unsigned n1, n2;
3566 const class iv_common_cand *const *const ccand1
3567 = (const class iv_common_cand *const *)p1;
3568 const class iv_common_cand *const *const ccand2
3569 = (const class iv_common_cand *const *)p2;
3570
3571 n1 = (*ccand1)->uses.length ();
3572 n2 = (*ccand2)->uses.length ();
3573 return n2 - n1;
3574 }
3575
3576 /* Adds IV candidates based on common candidated recorded. */
3577
3578 static void
3579 add_iv_candidate_derived_from_uses (struct ivopts_data *data)
3580 {
3581 unsigned i, j;
3582 struct iv_cand *cand_1, *cand_2;
3583
3584 data->iv_common_cands.qsort (common_cand_cmp);
3585 for (i = 0; i < data->iv_common_cands.length (); i++)
3586 {
3587 class iv_common_cand *ptr = data->iv_common_cands[i];
3588
3589 /* Only add IV candidate if it's derived from multiple uses. */
3590 if (ptr->uses.length () <= 1)
3591 break;
3592
3593 cand_1 = NULL;
3594 cand_2 = NULL;
3595 if (ip_normal_pos (data->current_loop))
3596 cand_1 = add_candidate_1 (data, ptr->base, ptr->step,
3597 false, IP_NORMAL, NULL, NULL);
3598
3599 if (ip_end_pos (data->current_loop)
3600 && allow_ip_end_pos_p (data->current_loop))
3601 cand_2 = add_candidate_1 (data, ptr->base, ptr->step,
3602 false, IP_END, NULL, NULL);
3603
3604 /* Bind deriving uses and the new candidates. */
3605 for (j = 0; j < ptr->uses.length (); j++)
3606 {
3607 struct iv_group *group = data->vgroups[ptr->uses[j]->group_id];
3608 if (cand_1)
3609 bitmap_set_bit (group->related_cands, cand_1->id);
3610 if (cand_2)
3611 bitmap_set_bit (group->related_cands, cand_2->id);
3612 }
3613 }
3614
3615 /* Release data since it is useless from this point. */
3616 data->iv_common_cand_tab->empty ();
3617 data->iv_common_cands.truncate (0);
3618 }
3619
3620 /* Adds candidates based on the value of USE's iv. */
3621
3622 static void
3623 add_iv_candidate_for_use (struct ivopts_data *data, struct iv_use *use)
3624 {
3625 poly_uint64 offset;
3626 tree base;
3627 struct iv *iv = use->iv;
3628 tree basetype = TREE_TYPE (iv->base);
3629
3630 /* Don't add candidate for iv_use with non integer, pointer or non-mode
3631 precision types, instead, add candidate for the corresponding scev in
3632 unsigned type with the same precision. See PR93674 for more info. */
3633 if ((TREE_CODE (basetype) != INTEGER_TYPE && !POINTER_TYPE_P (basetype))
3634 || !type_has_mode_precision_p (basetype))
3635 {
3636 basetype = lang_hooks.types.type_for_mode (TYPE_MODE (basetype),
3637 TYPE_UNSIGNED (basetype));
3638 add_candidate (data, fold_convert (basetype, iv->base),
3639 fold_convert (basetype, iv->step), false, NULL);
3640 return;
3641 }
3642
3643 add_candidate (data, iv->base, iv->step, false, use);
3644
3645 /* Record common candidate for use in case it can be shared by others. */
3646 record_common_cand (data, iv->base, iv->step, use);
3647
3648 /* Record common candidate with initial value zero. */
3649 basetype = TREE_TYPE (iv->base);
3650 if (POINTER_TYPE_P (basetype))
3651 basetype = sizetype;
3652 record_common_cand (data, build_int_cst (basetype, 0), iv->step, use);
3653
3654 /* Compare the cost of an address with an unscaled index with the cost of
3655 an address with a scaled index and add candidate if useful. */
3656 poly_int64 step;
3657 if (use != NULL
3658 && poly_int_tree_p (iv->step, &step)
3659 && address_p (use->type))
3660 {
3661 poly_int64 new_step;
3662 unsigned int fact = preferred_mem_scale_factor
3663 (use->iv->base,
3664 TYPE_MODE (use->mem_type),
3665 optimize_loop_for_speed_p (data->current_loop));
3666
3667 if (fact != 1
3668 && multiple_p (step, fact, &new_step))
3669 add_candidate (data, size_int (0),
3670 wide_int_to_tree (sizetype, new_step),
3671 true, NULL);
3672 }
3673
3674 /* Record common candidate with constant offset stripped in base.
3675 Like the use itself, we also add candidate directly for it. */
3676 base = strip_offset (iv->base, &offset);
3677 if (maybe_ne (offset, 0U) || base != iv->base)
3678 {
3679 record_common_cand (data, base, iv->step, use);
3680 add_candidate (data, base, iv->step, false, use);
3681 }
3682
3683 /* Record common candidate with base_object removed in base. */
3684 base = iv->base;
3685 STRIP_NOPS (base);
3686 if (iv->base_object != NULL && TREE_CODE (base) == POINTER_PLUS_EXPR)
3687 {
3688 tree step = iv->step;
3689
3690 STRIP_NOPS (step);
3691 base = TREE_OPERAND (base, 1);
3692 step = fold_convert (sizetype, step);
3693 record_common_cand (data, base, step, use);
3694 /* Also record common candidate with offset stripped. */
3695 base = strip_offset (base, &offset);
3696 if (maybe_ne (offset, 0U))
3697 record_common_cand (data, base, step, use);
3698 }
3699
3700 /* At last, add auto-incremental candidates. Make such variables
3701 important since other iv uses with same base object may be based
3702 on it. */
3703 if (use != NULL && address_p (use->type))
3704 add_autoinc_candidates (data, iv->base, iv->step, true, use);
3705 }
3706
3707 /* Adds candidates based on the uses. */
3708
3709 static void
3710 add_iv_candidate_for_groups (struct ivopts_data *data)
3711 {
3712 unsigned i;
3713
3714 /* Only add candidate for the first use in group. */
3715 for (i = 0; i < data->vgroups.length (); i++)
3716 {
3717 struct iv_group *group = data->vgroups[i];
3718
3719 gcc_assert (group->vuses[0] != NULL);
3720 add_iv_candidate_for_use (data, group->vuses[0]);
3721 }
3722 add_iv_candidate_derived_from_uses (data);
3723 }
3724
3725 /* Record important candidates and add them to related_cands bitmaps. */
3726
3727 static void
3728 record_important_candidates (struct ivopts_data *data)
3729 {
3730 unsigned i;
3731 struct iv_group *group;
3732
3733 for (i = 0; i < data->vcands.length (); i++)
3734 {
3735 struct iv_cand *cand = data->vcands[i];
3736
3737 if (cand->important)
3738 bitmap_set_bit (data->important_candidates, i);
3739 }
3740
3741 data->consider_all_candidates = (data->vcands.length ()
3742 <= CONSIDER_ALL_CANDIDATES_BOUND);
3743
3744 /* Add important candidates to groups' related_cands bitmaps. */
3745 for (i = 0; i < data->vgroups.length (); i++)
3746 {
3747 group = data->vgroups[i];
3748 bitmap_ior_into (group->related_cands, data->important_candidates);
3749 }
3750 }
3751
3752 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
3753 If consider_all_candidates is true, we use a two-dimensional array, otherwise
3754 we allocate a simple list to every use. */
3755
3756 static void
3757 alloc_use_cost_map (struct ivopts_data *data)
3758 {
3759 unsigned i, size, s;
3760
3761 for (i = 0; i < data->vgroups.length (); i++)
3762 {
3763 struct iv_group *group = data->vgroups[i];
3764
3765 if (data->consider_all_candidates)
3766 size = data->vcands.length ();
3767 else
3768 {
3769 s = bitmap_count_bits (group->related_cands);
3770
3771 /* Round up to the power of two, so that moduling by it is fast. */
3772 size = s ? (1 << ceil_log2 (s)) : 1;
3773 }
3774
3775 group->n_map_members = size;
3776 group->cost_map = XCNEWVEC (class cost_pair, size);
3777 }
3778 }
3779
3780 /* Sets cost of (GROUP, CAND) pair to COST and record that it depends
3781 on invariants INV_VARS and that the value used in expressing it is
3782 VALUE, and in case of iv elimination the comparison operator is COMP. */
3783
3784 static void
3785 set_group_iv_cost (struct ivopts_data *data,
3786 struct iv_group *group, struct iv_cand *cand,
3787 comp_cost cost, bitmap inv_vars, tree value,
3788 enum tree_code comp, bitmap inv_exprs)
3789 {
3790 unsigned i, s;
3791
3792 if (cost.infinite_cost_p ())
3793 {
3794 BITMAP_FREE (inv_vars);
3795 BITMAP_FREE (inv_exprs);
3796 return;
3797 }
3798
3799 if (data->consider_all_candidates)
3800 {
3801 group->cost_map[cand->id].cand = cand;
3802 group->cost_map[cand->id].cost = cost;
3803 group->cost_map[cand->id].inv_vars = inv_vars;
3804 group->cost_map[cand->id].inv_exprs = inv_exprs;
3805 group->cost_map[cand->id].value = value;
3806 group->cost_map[cand->id].comp = comp;
3807 return;
3808 }
3809
3810 /* n_map_members is a power of two, so this computes modulo. */
3811 s = cand->id & (group->n_map_members - 1);
3812 for (i = s; i < group->n_map_members; i++)
3813 if (!group->cost_map[i].cand)
3814 goto found;
3815 for (i = 0; i < s; i++)
3816 if (!group->cost_map[i].cand)
3817 goto found;
3818
3819 gcc_unreachable ();
3820
3821 found:
3822 group->cost_map[i].cand = cand;
3823 group->cost_map[i].cost = cost;
3824 group->cost_map[i].inv_vars = inv_vars;
3825 group->cost_map[i].inv_exprs = inv_exprs;
3826 group->cost_map[i].value = value;
3827 group->cost_map[i].comp = comp;
3828 }
3829
3830 /* Gets cost of (GROUP, CAND) pair. */
3831
3832 static class cost_pair *
3833 get_group_iv_cost (struct ivopts_data *data, struct iv_group *group,
3834 struct iv_cand *cand)
3835 {
3836 unsigned i, s;
3837 class cost_pair *ret;
3838
3839 if (!cand)
3840 return NULL;
3841
3842 if (data->consider_all_candidates)
3843 {
3844 ret = group->cost_map + cand->id;
3845 if (!ret->cand)
3846 return NULL;
3847
3848 return ret;
3849 }
3850
3851 /* n_map_members is a power of two, so this computes modulo. */
3852 s = cand->id & (group->n_map_members - 1);
3853 for (i = s; i < group->n_map_members; i++)
3854 if (group->cost_map[i].cand == cand)
3855 return group->cost_map + i;
3856 else if (group->cost_map[i].cand == NULL)
3857 return NULL;
3858 for (i = 0; i < s; i++)
3859 if (group->cost_map[i].cand == cand)
3860 return group->cost_map + i;
3861 else if (group->cost_map[i].cand == NULL)
3862 return NULL;
3863
3864 return NULL;
3865 }
3866
3867 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
3868 static rtx
3869 produce_memory_decl_rtl (tree obj, int *regno)
3870 {
3871 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (obj));
3872 machine_mode address_mode = targetm.addr_space.address_mode (as);
3873 rtx x;
3874
3875 gcc_assert (obj);
3876 if (TREE_STATIC (obj) || DECL_EXTERNAL (obj))
3877 {
3878 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj));
3879 x = gen_rtx_SYMBOL_REF (address_mode, name);
3880 SET_SYMBOL_REF_DECL (x, obj);
3881 x = gen_rtx_MEM (DECL_MODE (obj), x);
3882 set_mem_addr_space (x, as);
3883 targetm.encode_section_info (obj, x, true);
3884 }
3885 else
3886 {
3887 x = gen_raw_REG (address_mode, (*regno)++);
3888 x = gen_rtx_MEM (DECL_MODE (obj), x);
3889 set_mem_addr_space (x, as);
3890 }
3891
3892 return x;
3893 }
3894
3895 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
3896 walk_tree. DATA contains the actual fake register number. */
3897
3898 static tree
3899 prepare_decl_rtl (tree *expr_p, int *ws, void *data)
3900 {
3901 tree obj = NULL_TREE;
3902 rtx x = NULL_RTX;
3903 int *regno = (int *) data;
3904
3905 switch (TREE_CODE (*expr_p))
3906 {
3907 case ADDR_EXPR:
3908 for (expr_p = &TREE_OPERAND (*expr_p, 0);
3909 handled_component_p (*expr_p);
3910 expr_p = &TREE_OPERAND (*expr_p, 0))
3911 continue;
3912 obj = *expr_p;
3913 if (DECL_P (obj) && HAS_RTL_P (obj) && !DECL_RTL_SET_P (obj))
3914 x = produce_memory_decl_rtl (obj, regno);
3915 break;
3916
3917 case SSA_NAME:
3918 *ws = 0;
3919 obj = SSA_NAME_VAR (*expr_p);
3920 /* Defer handling of anonymous SSA_NAMEs to the expander. */
3921 if (!obj)
3922 return NULL_TREE;
3923 if (!DECL_RTL_SET_P (obj))
3924 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3925 break;
3926
3927 case VAR_DECL:
3928 case PARM_DECL:
3929 case RESULT_DECL:
3930 *ws = 0;
3931 obj = *expr_p;
3932
3933 if (DECL_RTL_SET_P (obj))
3934 break;
3935
3936 if (DECL_MODE (obj) == BLKmode)
3937 x = produce_memory_decl_rtl (obj, regno);
3938 else
3939 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3940
3941 break;
3942
3943 default:
3944 break;
3945 }
3946
3947 if (x)
3948 {
3949 decl_rtl_to_reset.safe_push (obj);
3950 SET_DECL_RTL (obj, x);
3951 }
3952
3953 return NULL_TREE;
3954 }
3955
3956 /* Predict whether the given loop will be transformed in the RTL
3957 doloop_optimize pass. Attempt to duplicate some doloop_optimize checks.
3958 This is only for target independent checks, see targetm.predict_doloop_p
3959 for the target dependent ones.
3960
3961 Note that according to some initial investigation, some checks like costly
3962 niter check and invalid stmt scanning don't have much gains among general
3963 cases, so keep this as simple as possible first.
3964
3965 Some RTL specific checks seems unable to be checked in gimple, if any new
3966 checks or easy checks _are_ missing here, please add them. */
3967
3968 static bool
3969 generic_predict_doloop_p (struct ivopts_data *data)
3970 {
3971 class loop *loop = data->current_loop;
3972
3973 /* Call target hook for target dependent checks. */
3974 if (!targetm.predict_doloop_p (loop))
3975 {
3976 if (dump_file && (dump_flags & TDF_DETAILS))
3977 fprintf (dump_file, "Predict doloop failure due to"
3978 " target specific checks.\n");
3979 return false;
3980 }
3981
3982 /* Similar to doloop_optimize, check iteration description to know it's
3983 suitable or not. Keep it as simple as possible, feel free to extend it
3984 if you find any multiple exits cases matter. */
3985 edge exit = single_dom_exit (loop);
3986 class tree_niter_desc *niter_desc;
3987 if (!exit || !(niter_desc = niter_for_exit (data, exit)))
3988 {
3989 if (dump_file && (dump_flags & TDF_DETAILS))
3990 fprintf (dump_file, "Predict doloop failure due to"
3991 " unexpected niters.\n");
3992 return false;
3993 }
3994
3995 /* Similar to doloop_optimize, check whether iteration count too small
3996 and not profitable. */
3997 HOST_WIDE_INT est_niter = get_estimated_loop_iterations_int (loop);
3998 if (est_niter == -1)
3999 est_niter = get_likely_max_loop_iterations_int (loop);
4000 if (est_niter >= 0 && est_niter < 3)
4001 {
4002 if (dump_file && (dump_flags & TDF_DETAILS))
4003 fprintf (dump_file,
4004 "Predict doloop failure due to"
4005 " too few iterations (%u).\n",
4006 (unsigned int) est_niter);
4007 return false;
4008 }
4009
4010 return true;
4011 }
4012
4013 /* Determines cost of the computation of EXPR. */
4014
4015 static unsigned
4016 computation_cost (tree expr, bool speed)
4017 {
4018 rtx_insn *seq;
4019 rtx rslt;
4020 tree type = TREE_TYPE (expr);
4021 unsigned cost;
4022 /* Avoid using hard regs in ways which may be unsupported. */
4023 int regno = LAST_VIRTUAL_REGISTER + 1;
4024 struct cgraph_node *node = cgraph_node::get (current_function_decl);
4025 enum node_frequency real_frequency = node->frequency;
4026
4027 node->frequency = NODE_FREQUENCY_NORMAL;
4028 crtl->maybe_hot_insn_p = speed;
4029 walk_tree (&expr, prepare_decl_rtl, &regno, NULL);
4030 start_sequence ();
4031 rslt = expand_expr (expr, NULL_RTX, TYPE_MODE (type), EXPAND_NORMAL);
4032 seq = get_insns ();
4033 end_sequence ();
4034 default_rtl_profile ();
4035 node->frequency = real_frequency;
4036
4037 cost = seq_cost (seq, speed);
4038 if (MEM_P (rslt))
4039 cost += address_cost (XEXP (rslt, 0), TYPE_MODE (type),
4040 TYPE_ADDR_SPACE (type), speed);
4041 else if (!REG_P (rslt))
4042 cost += set_src_cost (rslt, TYPE_MODE (type), speed);
4043
4044 return cost;
4045 }
4046
4047 /* Returns variable containing the value of candidate CAND at statement AT. */
4048
4049 static tree
4050 var_at_stmt (class loop *loop, struct iv_cand *cand, gimple *stmt)
4051 {
4052 if (stmt_after_increment (loop, cand, stmt))
4053 return cand->var_after;
4054 else
4055 return cand->var_before;
4056 }
4057
4058 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
4059 same precision that is at least as wide as the precision of TYPE, stores
4060 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
4061 type of A and B. */
4062
4063 static tree
4064 determine_common_wider_type (tree *a, tree *b)
4065 {
4066 tree wider_type = NULL;
4067 tree suba, subb;
4068 tree atype = TREE_TYPE (*a);
4069
4070 if (CONVERT_EXPR_P (*a))
4071 {
4072 suba = TREE_OPERAND (*a, 0);
4073 wider_type = TREE_TYPE (suba);
4074 if (TYPE_PRECISION (wider_type) < TYPE_PRECISION (atype))
4075 return atype;
4076 }
4077 else
4078 return atype;
4079
4080 if (CONVERT_EXPR_P (*b))
4081 {
4082 subb = TREE_OPERAND (*b, 0);
4083 if (TYPE_PRECISION (wider_type) != TYPE_PRECISION (TREE_TYPE (subb)))
4084 return atype;
4085 }
4086 else
4087 return atype;
4088
4089 *a = suba;
4090 *b = subb;
4091 return wider_type;
4092 }
4093
4094 /* Determines the expression by that USE is expressed from induction variable
4095 CAND at statement AT in LOOP. The expression is stored in two parts in a
4096 decomposed form. The invariant part is stored in AFF_INV; while variant
4097 part in AFF_VAR. Store ratio of CAND.step over USE.step in PRAT if it's
4098 non-null. Returns false if USE cannot be expressed using CAND. */
4099
4100 static bool
4101 get_computation_aff_1 (class loop *loop, gimple *at, struct iv_use *use,
4102 struct iv_cand *cand, class aff_tree *aff_inv,
4103 class aff_tree *aff_var, widest_int *prat = NULL)
4104 {
4105 tree ubase = use->iv->base, ustep = use->iv->step;
4106 tree cbase = cand->iv->base, cstep = cand->iv->step;
4107 tree common_type, uutype, var, cstep_common;
4108 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
4109 aff_tree aff_cbase;
4110 widest_int rat;
4111
4112 /* We must have a precision to express the values of use. */
4113 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
4114 return false;
4115
4116 var = var_at_stmt (loop, cand, at);
4117 uutype = unsigned_type_for (utype);
4118
4119 /* If the conversion is not noop, perform it. */
4120 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
4121 {
4122 if (cand->orig_iv != NULL && CONVERT_EXPR_P (cbase)
4123 && (CONVERT_EXPR_P (cstep) || poly_int_tree_p (cstep)))
4124 {
4125 tree inner_base, inner_step, inner_type;
4126 inner_base = TREE_OPERAND (cbase, 0);
4127 if (CONVERT_EXPR_P (cstep))
4128 inner_step = TREE_OPERAND (cstep, 0);
4129 else
4130 inner_step = cstep;
4131
4132 inner_type = TREE_TYPE (inner_base);
4133 /* If candidate is added from a biv whose type is smaller than
4134 ctype, we know both candidate and the biv won't overflow.
4135 In this case, it's safe to skip the convertion in candidate.
4136 As an example, (unsigned short)((unsigned long)A) equals to
4137 (unsigned short)A, if A has a type no larger than short. */
4138 if (TYPE_PRECISION (inner_type) <= TYPE_PRECISION (uutype))
4139 {
4140 cbase = inner_base;
4141 cstep = inner_step;
4142 }
4143 }
4144 cbase = fold_convert (uutype, cbase);
4145 cstep = fold_convert (uutype, cstep);
4146 var = fold_convert (uutype, var);
4147 }
4148
4149 /* Ratio is 1 when computing the value of biv cand by itself.
4150 We can't rely on constant_multiple_of in this case because the
4151 use is created after the original biv is selected. The call
4152 could fail because of inconsistent fold behavior. See PR68021
4153 for more information. */
4154 if (cand->pos == IP_ORIGINAL && cand->incremented_at == use->stmt)
4155 {
4156 gcc_assert (is_gimple_assign (use->stmt));
4157 gcc_assert (use->iv->ssa_name == cand->var_after);
4158 gcc_assert (gimple_assign_lhs (use->stmt) == cand->var_after);
4159 rat = 1;
4160 }
4161 else if (!constant_multiple_of (ustep, cstep, &rat))
4162 return false;
4163
4164 if (prat)
4165 *prat = rat;
4166
4167 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
4168 type, we achieve better folding by computing their difference in this
4169 wider type, and cast the result to UUTYPE. We do not need to worry about
4170 overflows, as all the arithmetics will in the end be performed in UUTYPE
4171 anyway. */
4172 common_type = determine_common_wider_type (&ubase, &cbase);
4173
4174 /* use = ubase - ratio * cbase + ratio * var. */
4175 tree_to_aff_combination (ubase, common_type, aff_inv);
4176 tree_to_aff_combination (cbase, common_type, &aff_cbase);
4177 tree_to_aff_combination (var, uutype, aff_var);
4178
4179 /* We need to shift the value if we are after the increment. */
4180 if (stmt_after_increment (loop, cand, at))
4181 {
4182 aff_tree cstep_aff;
4183
4184 if (common_type != uutype)
4185 cstep_common = fold_convert (common_type, cstep);
4186 else
4187 cstep_common = cstep;
4188
4189 tree_to_aff_combination (cstep_common, common_type, &cstep_aff);
4190 aff_combination_add (&aff_cbase, &cstep_aff);
4191 }
4192
4193 aff_combination_scale (&aff_cbase, -rat);
4194 aff_combination_add (aff_inv, &aff_cbase);
4195 if (common_type != uutype)
4196 aff_combination_convert (aff_inv, uutype);
4197
4198 aff_combination_scale (aff_var, rat);
4199 return true;
4200 }
4201
4202 /* Determines the expression by that USE is expressed from induction variable
4203 CAND at statement AT in LOOP. The expression is stored in a decomposed
4204 form into AFF. Returns false if USE cannot be expressed using CAND. */
4205
4206 static bool
4207 get_computation_aff (class loop *loop, gimple *at, struct iv_use *use,
4208 struct iv_cand *cand, class aff_tree *aff)
4209 {
4210 aff_tree aff_var;
4211
4212 if (!get_computation_aff_1 (loop, at, use, cand, aff, &aff_var))
4213 return false;
4214
4215 aff_combination_add (aff, &aff_var);
4216 return true;
4217 }
4218
4219 /* Return the type of USE. */
4220
4221 static tree
4222 get_use_type (struct iv_use *use)
4223 {
4224 tree base_type = TREE_TYPE (use->iv->base);
4225 tree type;
4226
4227 if (use->type == USE_REF_ADDRESS)
4228 {
4229 /* The base_type may be a void pointer. Create a pointer type based on
4230 the mem_ref instead. */
4231 type = build_pointer_type (TREE_TYPE (*use->op_p));
4232 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type))
4233 == TYPE_ADDR_SPACE (TREE_TYPE (base_type)));
4234 }
4235 else
4236 type = base_type;
4237
4238 return type;
4239 }
4240
4241 /* Determines the expression by that USE is expressed from induction variable
4242 CAND at statement AT in LOOP. The computation is unshared. */
4243
4244 static tree
4245 get_computation_at (class loop *loop, gimple *at,
4246 struct iv_use *use, struct iv_cand *cand)
4247 {
4248 aff_tree aff;
4249 tree type = get_use_type (use);
4250
4251 if (!get_computation_aff (loop, at, use, cand, &aff))
4252 return NULL_TREE;
4253 unshare_aff_combination (&aff);
4254 return fold_convert (type, aff_combination_to_tree (&aff));
4255 }
4256
4257 /* Like get_computation_at, but try harder, even if the computation
4258 is more expensive. Intended for debug stmts. */
4259
4260 static tree
4261 get_debug_computation_at (class loop *loop, gimple *at,
4262 struct iv_use *use, struct iv_cand *cand)
4263 {
4264 if (tree ret = get_computation_at (loop, at, use, cand))
4265 return ret;
4266
4267 tree ubase = use->iv->base, ustep = use->iv->step;
4268 tree cbase = cand->iv->base, cstep = cand->iv->step;
4269 tree var;
4270 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
4271 widest_int rat;
4272
4273 /* We must have a precision to express the values of use. */
4274 if (TYPE_PRECISION (utype) >= TYPE_PRECISION (ctype))
4275 return NULL_TREE;
4276
4277 /* Try to handle the case that get_computation_at doesn't,
4278 try to express
4279 use = ubase + (var - cbase) / ratio. */
4280 if (!constant_multiple_of (cstep, fold_convert (TREE_TYPE (cstep), ustep),
4281 &rat))
4282 return NULL_TREE;
4283
4284 bool neg_p = false;
4285 if (wi::neg_p (rat))
4286 {
4287 if (TYPE_UNSIGNED (ctype))
4288 return NULL_TREE;
4289 neg_p = true;
4290 rat = wi::neg (rat);
4291 }
4292
4293 /* If both IVs can wrap around and CAND doesn't have a power of two step,
4294 it is unsafe. Consider uint16_t CAND with step 9, when wrapping around,
4295 the values will be ... 0xfff0, 0xfff9, 2, 11 ... and when use is say
4296 uint8_t with step 3, those values divided by 3 cast to uint8_t will be
4297 ... 0x50, 0x53, 0, 3 ... rather than expected 0x50, 0x53, 0x56, 0x59. */
4298 if (!use->iv->no_overflow
4299 && !cand->iv->no_overflow
4300 && !integer_pow2p (cstep))
4301 return NULL_TREE;
4302
4303 int bits = wi::exact_log2 (rat);
4304 if (bits == -1)
4305 bits = wi::floor_log2 (rat) + 1;
4306 if (!cand->iv->no_overflow
4307 && TYPE_PRECISION (utype) + bits > TYPE_PRECISION (ctype))
4308 return NULL_TREE;
4309
4310 var = var_at_stmt (loop, cand, at);
4311
4312 if (POINTER_TYPE_P (ctype))
4313 {
4314 ctype = unsigned_type_for (ctype);
4315 cbase = fold_convert (ctype, cbase);
4316 cstep = fold_convert (ctype, cstep);
4317 var = fold_convert (ctype, var);
4318 }
4319
4320 if (stmt_after_increment (loop, cand, at))
4321 var = fold_build2 (MINUS_EXPR, TREE_TYPE (var), var,
4322 unshare_expr (cstep));
4323
4324 var = fold_build2 (MINUS_EXPR, TREE_TYPE (var), var, cbase);
4325 var = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (var), var,
4326 wide_int_to_tree (TREE_TYPE (var), rat));
4327 if (POINTER_TYPE_P (utype))
4328 {
4329 var = fold_convert (sizetype, var);
4330 if (neg_p)
4331 var = fold_build1 (NEGATE_EXPR, sizetype, var);
4332 var = fold_build2 (POINTER_PLUS_EXPR, utype, ubase, var);
4333 }
4334 else
4335 {
4336 var = fold_convert (utype, var);
4337 var = fold_build2 (neg_p ? MINUS_EXPR : PLUS_EXPR, utype,
4338 ubase, var);
4339 }
4340 return var;
4341 }
4342
4343 /* Adjust the cost COST for being in loop setup rather than loop body.
4344 If we're optimizing for space, the loop setup overhead is constant;
4345 if we're optimizing for speed, amortize it over the per-iteration cost.
4346 If ROUND_UP_P is true, the result is round up rather than to zero when
4347 optimizing for speed. */
4348 static int64_t
4349 adjust_setup_cost (struct ivopts_data *data, int64_t cost,
4350 bool round_up_p = false)
4351 {
4352 if (cost == INFTY)
4353 return cost;
4354 else if (optimize_loop_for_speed_p (data->current_loop))
4355 {
4356 int64_t niters = (int64_t) avg_loop_niter (data->current_loop);
4357 return (cost + (round_up_p ? niters - 1 : 0)) / niters;
4358 }
4359 else
4360 return cost;
4361 }
4362
4363 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
4364 EXPR operand holding the shift. COST0 and COST1 are the costs for
4365 calculating the operands of EXPR. Returns true if successful, and returns
4366 the cost in COST. */
4367
4368 static bool
4369 get_shiftadd_cost (tree expr, scalar_int_mode mode, comp_cost cost0,
4370 comp_cost cost1, tree mult, bool speed, comp_cost *cost)
4371 {
4372 comp_cost res;
4373 tree op1 = TREE_OPERAND (expr, 1);
4374 tree cst = TREE_OPERAND (mult, 1);
4375 tree multop = TREE_OPERAND (mult, 0);
4376 int m = exact_log2 (int_cst_value (cst));
4377 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
4378 int as_cost, sa_cost;
4379 bool mult_in_op1;
4380
4381 if (!(m >= 0 && m < maxm))
4382 return false;
4383
4384 STRIP_NOPS (op1);
4385 mult_in_op1 = operand_equal_p (op1, mult, 0);
4386
4387 as_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
4388
4389 /* If the target has a cheap shift-and-add or shift-and-sub instruction,
4390 use that in preference to a shift insn followed by an add insn. */
4391 sa_cost = (TREE_CODE (expr) != MINUS_EXPR
4392 ? shiftadd_cost (speed, mode, m)
4393 : (mult_in_op1
4394 ? shiftsub1_cost (speed, mode, m)
4395 : shiftsub0_cost (speed, mode, m)));
4396
4397 res = comp_cost (MIN (as_cost, sa_cost), 0);
4398 res += (mult_in_op1 ? cost0 : cost1);
4399
4400 STRIP_NOPS (multop);
4401 if (!is_gimple_val (multop))
4402 res += force_expr_to_var_cost (multop, speed);
4403
4404 *cost = res;
4405 return true;
4406 }
4407
4408 /* Estimates cost of forcing expression EXPR into a variable. */
4409
4410 static comp_cost
4411 force_expr_to_var_cost (tree expr, bool speed)
4412 {
4413 static bool costs_initialized = false;
4414 static unsigned integer_cost [2];
4415 static unsigned symbol_cost [2];
4416 static unsigned address_cost [2];
4417 tree op0, op1;
4418 comp_cost cost0, cost1, cost;
4419 machine_mode mode;
4420 scalar_int_mode int_mode;
4421
4422 if (!costs_initialized)
4423 {
4424 tree type = build_pointer_type (integer_type_node);
4425 tree var, addr;
4426 rtx x;
4427 int i;
4428
4429 var = create_tmp_var_raw (integer_type_node, "test_var");
4430 TREE_STATIC (var) = 1;
4431 x = produce_memory_decl_rtl (var, NULL);
4432 SET_DECL_RTL (var, x);
4433
4434 addr = build1 (ADDR_EXPR, type, var);
4435
4436
4437 for (i = 0; i < 2; i++)
4438 {
4439 integer_cost[i] = computation_cost (build_int_cst (integer_type_node,
4440 2000), i);
4441
4442 symbol_cost[i] = computation_cost (addr, i) + 1;
4443
4444 address_cost[i]
4445 = computation_cost (fold_build_pointer_plus_hwi (addr, 2000), i) + 1;
4446 if (dump_file && (dump_flags & TDF_DETAILS))
4447 {
4448 fprintf (dump_file, "force_expr_to_var_cost %s costs:\n", i ? "speed" : "size");
4449 fprintf (dump_file, " integer %d\n", (int) integer_cost[i]);
4450 fprintf (dump_file, " symbol %d\n", (int) symbol_cost[i]);
4451 fprintf (dump_file, " address %d\n", (int) address_cost[i]);
4452 fprintf (dump_file, " other %d\n", (int) target_spill_cost[i]);
4453 fprintf (dump_file, "\n");
4454 }
4455 }
4456
4457 costs_initialized = true;
4458 }
4459
4460 STRIP_NOPS (expr);
4461
4462 if (SSA_VAR_P (expr))
4463 return no_cost;
4464
4465 if (is_gimple_min_invariant (expr))
4466 {
4467 if (poly_int_tree_p (expr))
4468 return comp_cost (integer_cost [speed], 0);
4469
4470 if (TREE_CODE (expr) == ADDR_EXPR)
4471 {
4472 tree obj = TREE_OPERAND (expr, 0);
4473
4474 if (VAR_P (obj)
4475 || TREE_CODE (obj) == PARM_DECL
4476 || TREE_CODE (obj) == RESULT_DECL)
4477 return comp_cost (symbol_cost [speed], 0);
4478 }
4479
4480 return comp_cost (address_cost [speed], 0);
4481 }
4482
4483 switch (TREE_CODE (expr))
4484 {
4485 case POINTER_PLUS_EXPR:
4486 case PLUS_EXPR:
4487 case MINUS_EXPR:
4488 case MULT_EXPR:
4489 case TRUNC_DIV_EXPR:
4490 case BIT_AND_EXPR:
4491 case BIT_IOR_EXPR:
4492 case LSHIFT_EXPR:
4493 case RSHIFT_EXPR:
4494 op0 = TREE_OPERAND (expr, 0);
4495 op1 = TREE_OPERAND (expr, 1);
4496 STRIP_NOPS (op0);
4497 STRIP_NOPS (op1);
4498 break;
4499
4500 CASE_CONVERT:
4501 case NEGATE_EXPR:
4502 case BIT_NOT_EXPR:
4503 op0 = TREE_OPERAND (expr, 0);
4504 STRIP_NOPS (op0);
4505 op1 = NULL_TREE;
4506 break;
4507 /* See add_iv_candidate_for_doloop, for doloop may_be_zero case, we
4508 introduce COND_EXPR for IV base, need to support better cost estimation
4509 for this COND_EXPR and tcc_comparison. */
4510 case COND_EXPR:
4511 op0 = TREE_OPERAND (expr, 1);
4512 STRIP_NOPS (op0);
4513 op1 = TREE_OPERAND (expr, 2);
4514 STRIP_NOPS (op1);
4515 break;
4516 case LT_EXPR:
4517 case LE_EXPR:
4518 case GT_EXPR:
4519 case GE_EXPR:
4520 case EQ_EXPR:
4521 case NE_EXPR:
4522 case UNORDERED_EXPR:
4523 case ORDERED_EXPR:
4524 case UNLT_EXPR:
4525 case UNLE_EXPR:
4526 case UNGT_EXPR:
4527 case UNGE_EXPR:
4528 case UNEQ_EXPR:
4529 case LTGT_EXPR:
4530 case MAX_EXPR:
4531 case MIN_EXPR:
4532 op0 = TREE_OPERAND (expr, 0);
4533 STRIP_NOPS (op0);
4534 op1 = TREE_OPERAND (expr, 1);
4535 STRIP_NOPS (op1);
4536 break;
4537
4538 default:
4539 /* Just an arbitrary value, FIXME. */
4540 return comp_cost (target_spill_cost[speed], 0);
4541 }
4542
4543 if (op0 == NULL_TREE
4544 || TREE_CODE (op0) == SSA_NAME || CONSTANT_CLASS_P (op0))
4545 cost0 = no_cost;
4546 else
4547 cost0 = force_expr_to_var_cost (op0, speed);
4548
4549 if (op1 == NULL_TREE
4550 || TREE_CODE (op1) == SSA_NAME || CONSTANT_CLASS_P (op1))
4551 cost1 = no_cost;
4552 else
4553 cost1 = force_expr_to_var_cost (op1, speed);
4554
4555 mode = TYPE_MODE (TREE_TYPE (expr));
4556 switch (TREE_CODE (expr))
4557 {
4558 case POINTER_PLUS_EXPR:
4559 case PLUS_EXPR:
4560 case MINUS_EXPR:
4561 case NEGATE_EXPR:
4562 cost = comp_cost (add_cost (speed, mode), 0);
4563 if (TREE_CODE (expr) != NEGATE_EXPR)
4564 {
4565 tree mult = NULL_TREE;
4566 comp_cost sa_cost;
4567 if (TREE_CODE (op1) == MULT_EXPR)
4568 mult = op1;
4569 else if (TREE_CODE (op0) == MULT_EXPR)
4570 mult = op0;
4571
4572 if (mult != NULL_TREE
4573 && is_a <scalar_int_mode> (mode, &int_mode)
4574 && cst_and_fits_in_hwi (TREE_OPERAND (mult, 1))
4575 && get_shiftadd_cost (expr, int_mode, cost0, cost1, mult,
4576 speed, &sa_cost))
4577 return sa_cost;
4578 }
4579 break;
4580
4581 CASE_CONVERT:
4582 {
4583 tree inner_mode, outer_mode;
4584 outer_mode = TREE_TYPE (expr);
4585 inner_mode = TREE_TYPE (op0);
4586 cost = comp_cost (convert_cost (TYPE_MODE (outer_mode),
4587 TYPE_MODE (inner_mode), speed), 0);
4588 }
4589 break;
4590
4591 case MULT_EXPR:
4592 if (cst_and_fits_in_hwi (op0))
4593 cost = comp_cost (mult_by_coeff_cost (int_cst_value (op0),
4594 mode, speed), 0);
4595 else if (cst_and_fits_in_hwi (op1))
4596 cost = comp_cost (mult_by_coeff_cost (int_cst_value (op1),
4597 mode, speed), 0);
4598 else
4599 return comp_cost (target_spill_cost [speed], 0);
4600 break;
4601
4602 case TRUNC_DIV_EXPR:
4603 /* Division by power of two is usually cheap, so we allow it. Forbid
4604 anything else. */
4605 if (integer_pow2p (TREE_OPERAND (expr, 1)))
4606 cost = comp_cost (add_cost (speed, mode), 0);
4607 else
4608 cost = comp_cost (target_spill_cost[speed], 0);
4609 break;
4610
4611 case BIT_AND_EXPR:
4612 case BIT_IOR_EXPR:
4613 case BIT_NOT_EXPR:
4614 case LSHIFT_EXPR:
4615 case RSHIFT_EXPR:
4616 cost = comp_cost (add_cost (speed, mode), 0);
4617 break;
4618 case COND_EXPR:
4619 op0 = TREE_OPERAND (expr, 0);
4620 STRIP_NOPS (op0);
4621 if (op0 == NULL_TREE || TREE_CODE (op0) == SSA_NAME
4622 || CONSTANT_CLASS_P (op0))
4623 cost = no_cost;
4624 else
4625 cost = force_expr_to_var_cost (op0, speed);
4626 break;
4627 case LT_EXPR:
4628 case LE_EXPR:
4629 case GT_EXPR:
4630 case GE_EXPR:
4631 case EQ_EXPR:
4632 case NE_EXPR:
4633 case UNORDERED_EXPR:
4634 case ORDERED_EXPR:
4635 case UNLT_EXPR:
4636 case UNLE_EXPR:
4637 case UNGT_EXPR:
4638 case UNGE_EXPR:
4639 case UNEQ_EXPR:
4640 case LTGT_EXPR:
4641 case MAX_EXPR:
4642 case MIN_EXPR:
4643 /* Simply use add cost for now, FIXME if there is some more accurate cost
4644 evaluation way. */
4645 cost = comp_cost (add_cost (speed, mode), 0);
4646 break;
4647
4648 default:
4649 gcc_unreachable ();
4650 }
4651
4652 cost += cost0;
4653 cost += cost1;
4654 return cost;
4655 }
4656
4657 /* Estimates cost of forcing EXPR into a variable. INV_VARS is a set of the
4658 invariants the computation depends on. */
4659
4660 static comp_cost
4661 force_var_cost (struct ivopts_data *data, tree expr, bitmap *inv_vars)
4662 {
4663 if (!expr)
4664 return no_cost;
4665
4666 find_inv_vars (data, &expr, inv_vars);
4667 return force_expr_to_var_cost (expr, data->speed);
4668 }
4669
4670 /* Returns cost of auto-modifying address expression in shape base + offset.
4671 AINC_STEP is step size of the address IV. AINC_OFFSET is offset of the
4672 address expression. The address expression has ADDR_MODE in addr space
4673 AS. The memory access has MEM_MODE. SPEED means we are optimizing for
4674 speed or size. */
4675
4676 enum ainc_type
4677 {
4678 AINC_PRE_INC, /* Pre increment. */
4679 AINC_PRE_DEC, /* Pre decrement. */
4680 AINC_POST_INC, /* Post increment. */
4681 AINC_POST_DEC, /* Post decrement. */
4682 AINC_NONE /* Also the number of auto increment types. */
4683 };
4684
4685 struct ainc_cost_data
4686 {
4687 int64_t costs[AINC_NONE];
4688 };
4689
4690 static comp_cost
4691 get_address_cost_ainc (poly_int64 ainc_step, poly_int64 ainc_offset,
4692 machine_mode addr_mode, machine_mode mem_mode,
4693 addr_space_t as, bool speed)
4694 {
4695 if (!USE_LOAD_PRE_DECREMENT (mem_mode)
4696 && !USE_STORE_PRE_DECREMENT (mem_mode)
4697 && !USE_LOAD_POST_DECREMENT (mem_mode)
4698 && !USE_STORE_POST_DECREMENT (mem_mode)
4699 && !USE_LOAD_PRE_INCREMENT (mem_mode)
4700 && !USE_STORE_PRE_INCREMENT (mem_mode)
4701 && !USE_LOAD_POST_INCREMENT (mem_mode)
4702 && !USE_STORE_POST_INCREMENT (mem_mode))
4703 return infinite_cost;
4704
4705 static vec<ainc_cost_data *> ainc_cost_data_list;
4706 unsigned idx = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
4707 if (idx >= ainc_cost_data_list.length ())
4708 {
4709 unsigned nsize = ((unsigned) as + 1) *MAX_MACHINE_MODE;
4710
4711 gcc_assert (nsize > idx);
4712 ainc_cost_data_list.safe_grow_cleared (nsize, true);
4713 }
4714
4715 ainc_cost_data *data = ainc_cost_data_list[idx];
4716 if (data == NULL)
4717 {
4718 rtx reg = gen_raw_REG (addr_mode, LAST_VIRTUAL_REGISTER + 1);
4719
4720 data = (ainc_cost_data *) xcalloc (1, sizeof (*data));
4721 data->costs[AINC_PRE_DEC] = INFTY;
4722 data->costs[AINC_POST_DEC] = INFTY;
4723 data->costs[AINC_PRE_INC] = INFTY;
4724 data->costs[AINC_POST_INC] = INFTY;
4725 if (USE_LOAD_PRE_DECREMENT (mem_mode)
4726 || USE_STORE_PRE_DECREMENT (mem_mode))
4727 {
4728 rtx addr = gen_rtx_PRE_DEC (addr_mode, reg);
4729
4730 if (memory_address_addr_space_p (mem_mode, addr, as))
4731 data->costs[AINC_PRE_DEC]
4732 = address_cost (addr, mem_mode, as, speed);
4733 }
4734 if (USE_LOAD_POST_DECREMENT (mem_mode)
4735 || USE_STORE_POST_DECREMENT (mem_mode))
4736 {
4737 rtx addr = gen_rtx_POST_DEC (addr_mode, reg);
4738
4739 if (memory_address_addr_space_p (mem_mode, addr, as))
4740 data->costs[AINC_POST_DEC]
4741 = address_cost (addr, mem_mode, as, speed);
4742 }
4743 if (USE_LOAD_PRE_INCREMENT (mem_mode)
4744 || USE_STORE_PRE_INCREMENT (mem_mode))
4745 {
4746 rtx addr = gen_rtx_PRE_INC (addr_mode, reg);
4747
4748 if (memory_address_addr_space_p (mem_mode, addr, as))
4749 data->costs[AINC_PRE_INC]
4750 = address_cost (addr, mem_mode, as, speed);
4751 }
4752 if (USE_LOAD_POST_INCREMENT (mem_mode)
4753 || USE_STORE_POST_INCREMENT (mem_mode))
4754 {
4755 rtx addr = gen_rtx_POST_INC (addr_mode, reg);
4756
4757 if (memory_address_addr_space_p (mem_mode, addr, as))
4758 data->costs[AINC_POST_INC]
4759 = address_cost (addr, mem_mode, as, speed);
4760 }
4761 ainc_cost_data_list[idx] = data;
4762 }
4763
4764 poly_int64 msize = GET_MODE_SIZE (mem_mode);
4765 if (known_eq (ainc_offset, 0) && known_eq (msize, ainc_step))
4766 return comp_cost (data->costs[AINC_POST_INC], 0);
4767 if (known_eq (ainc_offset, 0) && known_eq (msize, -ainc_step))
4768 return comp_cost (data->costs[AINC_POST_DEC], 0);
4769 if (known_eq (ainc_offset, msize) && known_eq (msize, ainc_step))
4770 return comp_cost (data->costs[AINC_PRE_INC], 0);
4771 if (known_eq (ainc_offset, -msize) && known_eq (msize, -ainc_step))
4772 return comp_cost (data->costs[AINC_PRE_DEC], 0);
4773
4774 return infinite_cost;
4775 }
4776
4777 /* Return cost of computing USE's address expression by using CAND.
4778 AFF_INV and AFF_VAR represent invariant and variant parts of the
4779 address expression, respectively. If AFF_INV is simple, store
4780 the loop invariant variables which are depended by it in INV_VARS;
4781 if AFF_INV is complicated, handle it as a new invariant expression
4782 and record it in INV_EXPR. RATIO indicates multiple times between
4783 steps of USE and CAND. If CAN_AUTOINC is nonNULL, store boolean
4784 value to it indicating if this is an auto-increment address. */
4785
4786 static comp_cost
4787 get_address_cost (struct ivopts_data *data, struct iv_use *use,
4788 struct iv_cand *cand, aff_tree *aff_inv,
4789 aff_tree *aff_var, HOST_WIDE_INT ratio,
4790 bitmap *inv_vars, iv_inv_expr_ent **inv_expr,
4791 bool *can_autoinc, bool speed)
4792 {
4793 rtx addr;
4794 bool simple_inv = true;
4795 tree comp_inv = NULL_TREE, type = aff_var->type;
4796 comp_cost var_cost = no_cost, cost = no_cost;
4797 struct mem_address parts = {NULL_TREE, integer_one_node,
4798 NULL_TREE, NULL_TREE, NULL_TREE};
4799 machine_mode addr_mode = TYPE_MODE (type);
4800 machine_mode mem_mode = TYPE_MODE (use->mem_type);
4801 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (use->iv->base));
4802 /* Only true if ratio != 1. */
4803 bool ok_with_ratio_p = false;
4804 bool ok_without_ratio_p = false;
4805
4806 if (!aff_combination_const_p (aff_inv))
4807 {
4808 parts.index = integer_one_node;
4809 /* Addressing mode "base + index". */
4810 ok_without_ratio_p = valid_mem_ref_p (mem_mode, as, &parts);
4811 if (ratio != 1)
4812 {
4813 parts.step = wide_int_to_tree (type, ratio);
4814 /* Addressing mode "base + index << scale". */
4815 ok_with_ratio_p = valid_mem_ref_p (mem_mode, as, &parts);
4816 if (!ok_with_ratio_p)
4817 parts.step = NULL_TREE;
4818 }
4819 if (ok_with_ratio_p || ok_without_ratio_p)
4820 {
4821 if (maybe_ne (aff_inv->offset, 0))
4822 {
4823 parts.offset = wide_int_to_tree (sizetype, aff_inv->offset);
4824 /* Addressing mode "base + index [<< scale] + offset". */
4825 if (!valid_mem_ref_p (mem_mode, as, &parts))
4826 parts.offset = NULL_TREE;
4827 else
4828 aff_inv->offset = 0;
4829 }
4830
4831 move_fixed_address_to_symbol (&parts, aff_inv);
4832 /* Base is fixed address and is moved to symbol part. */
4833 if (parts.symbol != NULL_TREE && aff_combination_zero_p (aff_inv))
4834 parts.base = NULL_TREE;
4835
4836 /* Addressing mode "symbol + base + index [<< scale] [+ offset]". */
4837 if (parts.symbol != NULL_TREE
4838 && !valid_mem_ref_p (mem_mode, as, &parts))
4839 {
4840 aff_combination_add_elt (aff_inv, parts.symbol, 1);
4841 parts.symbol = NULL_TREE;
4842 /* Reset SIMPLE_INV since symbol address needs to be computed
4843 outside of address expression in this case. */
4844 simple_inv = false;
4845 /* Symbol part is moved back to base part, it can't be NULL. */
4846 parts.base = integer_one_node;
4847 }
4848 }
4849 else
4850 parts.index = NULL_TREE;
4851 }
4852 else
4853 {
4854 poly_int64 ainc_step;
4855 if (can_autoinc
4856 && ratio == 1
4857 && ptrdiff_tree_p (cand->iv->step, &ainc_step))
4858 {
4859 poly_int64 ainc_offset = (aff_inv->offset).force_shwi ();
4860
4861 if (stmt_after_increment (data->current_loop, cand, use->stmt))
4862 ainc_offset += ainc_step;
4863 cost = get_address_cost_ainc (ainc_step, ainc_offset,
4864 addr_mode, mem_mode, as, speed);
4865 if (!cost.infinite_cost_p ())
4866 {
4867 *can_autoinc = true;
4868 return cost;
4869 }
4870 cost = no_cost;
4871 }
4872 if (!aff_combination_zero_p (aff_inv))
4873 {
4874 parts.offset = wide_int_to_tree (sizetype, aff_inv->offset);
4875 /* Addressing mode "base + offset". */
4876 if (!valid_mem_ref_p (mem_mode, as, &parts))
4877 parts.offset = NULL_TREE;
4878 else
4879 aff_inv->offset = 0;
4880 }
4881 }
4882
4883 if (simple_inv)
4884 simple_inv = (aff_inv == NULL
4885 || aff_combination_const_p (aff_inv)
4886 || aff_combination_singleton_var_p (aff_inv));
4887 if (!aff_combination_zero_p (aff_inv))
4888 comp_inv = aff_combination_to_tree (aff_inv);
4889 if (comp_inv != NULL_TREE)
4890 cost = force_var_cost (data, comp_inv, inv_vars);
4891 if (ratio != 1 && parts.step == NULL_TREE)
4892 var_cost += mult_by_coeff_cost (ratio, addr_mode, speed);
4893 if (comp_inv != NULL_TREE && parts.index == NULL_TREE)
4894 var_cost += add_cost (speed, addr_mode);
4895
4896 if (comp_inv && inv_expr && !simple_inv)
4897 {
4898 *inv_expr = get_loop_invariant_expr (data, comp_inv);
4899 /* Clear depends on. */
4900 if (*inv_expr != NULL && inv_vars && *inv_vars)
4901 bitmap_clear (*inv_vars);
4902
4903 /* Cost of small invariant expression adjusted against loop niters
4904 is usually zero, which makes it difficult to be differentiated
4905 from candidate based on loop invariant variables. Secondly, the
4906 generated invariant expression may not be hoisted out of loop by
4907 following pass. We penalize the cost by rounding up in order to
4908 neutralize such effects. */
4909 cost.cost = adjust_setup_cost (data, cost.cost, true);
4910 cost.scratch = cost.cost;
4911 }
4912
4913 cost += var_cost;
4914 addr = addr_for_mem_ref (&parts, as, false);
4915 gcc_assert (memory_address_addr_space_p (mem_mode, addr, as));
4916 cost += address_cost (addr, mem_mode, as, speed);
4917
4918 if (parts.symbol != NULL_TREE)
4919 cost.complexity += 1;
4920 /* Don't increase the complexity of adding a scaled index if it's
4921 the only kind of index that the target allows. */
4922 if (parts.step != NULL_TREE && ok_without_ratio_p)
4923 cost.complexity += 1;
4924 if (parts.base != NULL_TREE && parts.index != NULL_TREE)
4925 cost.complexity += 1;
4926 if (parts.offset != NULL_TREE && !integer_zerop (parts.offset))
4927 cost.complexity += 1;
4928
4929 return cost;
4930 }
4931
4932 /* Scale (multiply) the computed COST (except scratch part that should be
4933 hoisted out a loop) by header->frequency / AT->frequency, which makes
4934 expected cost more accurate. */
4935
4936 static comp_cost
4937 get_scaled_computation_cost_at (ivopts_data *data, gimple *at, comp_cost cost)
4938 {
4939 if (data->speed
4940 && data->current_loop->header->count.to_frequency (cfun) > 0)
4941 {
4942 basic_block bb = gimple_bb (at);
4943 gcc_assert (cost.scratch <= cost.cost);
4944 int scale_factor = (int)(intptr_t) bb->aux;
4945 if (scale_factor == 1)
4946 return cost;
4947
4948 int64_t scaled_cost
4949 = cost.scratch + (cost.cost - cost.scratch) * scale_factor;
4950
4951 if (dump_file && (dump_flags & TDF_DETAILS))
4952 fprintf (dump_file, "Scaling cost based on bb prob by %2.2f: "
4953 "%" PRId64 " (scratch: %" PRId64 ") -> %" PRId64 "\n",
4954 1.0f * scale_factor, cost.cost, cost.scratch, scaled_cost);
4955
4956 cost.cost = scaled_cost;
4957 }
4958
4959 return cost;
4960 }
4961
4962 /* Determines the cost of the computation by that USE is expressed
4963 from induction variable CAND. If ADDRESS_P is true, we just need
4964 to create an address from it, otherwise we want to get it into
4965 register. A set of invariants we depend on is stored in INV_VARS.
4966 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4967 addressing is likely. If INV_EXPR is nonnull, record invariant
4968 expr entry in it. */
4969
4970 static comp_cost
4971 get_computation_cost (struct ivopts_data *data, struct iv_use *use,
4972 struct iv_cand *cand, bool address_p, bitmap *inv_vars,
4973 bool *can_autoinc, iv_inv_expr_ent **inv_expr)
4974 {
4975 gimple *at = use->stmt;
4976 tree ubase = use->iv->base, cbase = cand->iv->base;
4977 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
4978 tree comp_inv = NULL_TREE;
4979 HOST_WIDE_INT ratio, aratio;
4980 comp_cost cost;
4981 widest_int rat;
4982 aff_tree aff_inv, aff_var;
4983 bool speed = optimize_bb_for_speed_p (gimple_bb (at));
4984
4985 if (inv_vars)
4986 *inv_vars = NULL;
4987 if (can_autoinc)
4988 *can_autoinc = false;
4989 if (inv_expr)
4990 *inv_expr = NULL;
4991
4992 /* Check if we have enough precision to express the values of use. */
4993 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
4994 return infinite_cost;
4995
4996 if (address_p
4997 || (use->iv->base_object
4998 && cand->iv->base_object
4999 && POINTER_TYPE_P (TREE_TYPE (use->iv->base_object))
5000 && POINTER_TYPE_P (TREE_TYPE (cand->iv->base_object))))
5001 {
5002 /* Do not try to express address of an object with computation based
5003 on address of a different object. This may cause problems in rtl
5004 level alias analysis (that does not expect this to be happening,
5005 as this is illegal in C), and would be unlikely to be useful
5006 anyway. */
5007 if (use->iv->base_object
5008 && cand->iv->base_object
5009 && !operand_equal_p (use->iv->base_object, cand->iv->base_object, 0))
5010 return infinite_cost;
5011 }
5012
5013 if (!get_computation_aff_1 (data->current_loop, at, use,
5014 cand, &aff_inv, &aff_var, &rat)
5015 || !wi::fits_shwi_p (rat))
5016 return infinite_cost;
5017
5018 ratio = rat.to_shwi ();
5019 if (address_p)
5020 {
5021 cost = get_address_cost (data, use, cand, &aff_inv, &aff_var, ratio,
5022 inv_vars, inv_expr, can_autoinc, speed);
5023 cost = get_scaled_computation_cost_at (data, at, cost);
5024 /* For doloop IV cand, add on the extra cost. */
5025 cost += cand->doloop_p ? targetm.doloop_cost_for_address : 0;
5026 return cost;
5027 }
5028
5029 bool simple_inv = (aff_combination_const_p (&aff_inv)
5030 || aff_combination_singleton_var_p (&aff_inv));
5031 tree signed_type = signed_type_for (aff_combination_type (&aff_inv));
5032 aff_combination_convert (&aff_inv, signed_type);
5033 if (!aff_combination_zero_p (&aff_inv))
5034 comp_inv = aff_combination_to_tree (&aff_inv);
5035
5036 cost = force_var_cost (data, comp_inv, inv_vars);
5037 if (comp_inv && inv_expr && !simple_inv)
5038 {
5039 *inv_expr = get_loop_invariant_expr (data, comp_inv);
5040 /* Clear depends on. */
5041 if (*inv_expr != NULL && inv_vars && *inv_vars)
5042 bitmap_clear (*inv_vars);
5043
5044 cost.cost = adjust_setup_cost (data, cost.cost);
5045 /* Record setup cost in scratch field. */
5046 cost.scratch = cost.cost;
5047 }
5048 /* Cost of constant integer can be covered when adding invariant part to
5049 variant part. */
5050 else if (comp_inv && CONSTANT_CLASS_P (comp_inv))
5051 cost = no_cost;
5052
5053 /* Need type narrowing to represent use with cand. */
5054 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
5055 {
5056 machine_mode outer_mode = TYPE_MODE (utype);
5057 machine_mode inner_mode = TYPE_MODE (ctype);
5058 cost += comp_cost (convert_cost (outer_mode, inner_mode, speed), 0);
5059 }
5060
5061 /* Turn a + i * (-c) into a - i * c. */
5062 if (ratio < 0 && comp_inv && !integer_zerop (comp_inv))
5063 aratio = -ratio;
5064 else
5065 aratio = ratio;
5066
5067 if (ratio != 1)
5068 cost += mult_by_coeff_cost (aratio, TYPE_MODE (utype), speed);
5069
5070 /* TODO: We may also need to check if we can compute a + i * 4 in one
5071 instruction. */
5072 /* Need to add up the invariant and variant parts. */
5073 if (comp_inv && !integer_zerop (comp_inv))
5074 cost += add_cost (speed, TYPE_MODE (utype));
5075
5076 cost = get_scaled_computation_cost_at (data, at, cost);
5077
5078 /* For doloop IV cand, add on the extra cost. */
5079 if (cand->doloop_p && use->type == USE_NONLINEAR_EXPR)
5080 cost += targetm.doloop_cost_for_generic;
5081
5082 return cost;
5083 }
5084
5085 /* Determines cost of computing the use in GROUP with CAND in a generic
5086 expression. */
5087
5088 static bool
5089 determine_group_iv_cost_generic (struct ivopts_data *data,
5090 struct iv_group *group, struct iv_cand *cand)
5091 {
5092 comp_cost cost;
5093 iv_inv_expr_ent *inv_expr = NULL;
5094 bitmap inv_vars = NULL, inv_exprs = NULL;
5095 struct iv_use *use = group->vuses[0];
5096
5097 /* The simple case first -- if we need to express value of the preserved
5098 original biv, the cost is 0. This also prevents us from counting the
5099 cost of increment twice -- once at this use and once in the cost of
5100 the candidate. */
5101 if (cand->pos == IP_ORIGINAL && cand->incremented_at == use->stmt)
5102 cost = no_cost;
5103 /* If the IV candidate involves undefined SSA values and is not the
5104 same IV as on the USE avoid using that candidate here. */
5105 else if (cand->involves_undefs
5106 && (!use->iv || !operand_equal_p (cand->iv->base, use->iv->base, 0)))
5107 return false;
5108 else
5109 cost = get_computation_cost (data, use, cand, false,
5110 &inv_vars, NULL, &inv_expr);
5111
5112 if (inv_expr)
5113 {
5114 inv_exprs = BITMAP_ALLOC (NULL);
5115 bitmap_set_bit (inv_exprs, inv_expr->id);
5116 }
5117 set_group_iv_cost (data, group, cand, cost, inv_vars,
5118 NULL_TREE, ERROR_MARK, inv_exprs);
5119 return !cost.infinite_cost_p ();
5120 }
5121
5122 /* Determines cost of computing uses in GROUP with CAND in addresses. */
5123
5124 static bool
5125 determine_group_iv_cost_address (struct ivopts_data *data,
5126 struct iv_group *group, struct iv_cand *cand)
5127 {
5128 unsigned i;
5129 bitmap inv_vars = NULL, inv_exprs = NULL;
5130 bool can_autoinc;
5131 iv_inv_expr_ent *inv_expr = NULL;
5132 struct iv_use *use = group->vuses[0];
5133 comp_cost sum_cost = no_cost, cost;
5134
5135 cost = get_computation_cost (data, use, cand, true,
5136 &inv_vars, &can_autoinc, &inv_expr);
5137
5138 if (inv_expr)
5139 {
5140 inv_exprs = BITMAP_ALLOC (NULL);
5141 bitmap_set_bit (inv_exprs, inv_expr->id);
5142 }
5143 sum_cost = cost;
5144 if (!sum_cost.infinite_cost_p () && cand->ainc_use == use)
5145 {
5146 if (can_autoinc)
5147 sum_cost -= cand->cost_step;
5148 /* If we generated the candidate solely for exploiting autoincrement
5149 opportunities, and it turns out it can't be used, set the cost to
5150 infinity to make sure we ignore it. */
5151 else if (cand->pos == IP_AFTER_USE || cand->pos == IP_BEFORE_USE)
5152 sum_cost = infinite_cost;
5153 }
5154
5155 /* Uses in a group can share setup code, so only add setup cost once. */
5156 cost -= cost.scratch;
5157 /* Compute and add costs for rest uses of this group. */
5158 for (i = 1; i < group->vuses.length () && !sum_cost.infinite_cost_p (); i++)
5159 {
5160 struct iv_use *next = group->vuses[i];
5161
5162 /* TODO: We could skip computing cost for sub iv_use when it has the
5163 same cost as the first iv_use, but the cost really depends on the
5164 offset and where the iv_use is. */
5165 cost = get_computation_cost (data, next, cand, true,
5166 NULL, &can_autoinc, &inv_expr);
5167 if (inv_expr)
5168 {
5169 if (!inv_exprs)
5170 inv_exprs = BITMAP_ALLOC (NULL);
5171
5172 bitmap_set_bit (inv_exprs, inv_expr->id);
5173 }
5174 sum_cost += cost;
5175 }
5176 set_group_iv_cost (data, group, cand, sum_cost, inv_vars,
5177 NULL_TREE, ERROR_MARK, inv_exprs);
5178
5179 return !sum_cost.infinite_cost_p ();
5180 }
5181
5182 /* Computes value of candidate CAND at position AT in iteration DESC->NITER,
5183 and stores it to VAL. */
5184
5185 static void
5186 cand_value_at (class loop *loop, struct iv_cand *cand, gimple *at,
5187 class tree_niter_desc *desc, aff_tree *val)
5188 {
5189 aff_tree step, delta, nit;
5190 struct iv *iv = cand->iv;
5191 tree type = TREE_TYPE (iv->base);
5192 tree niter = desc->niter;
5193 bool after_adjust = stmt_after_increment (loop, cand, at);
5194 tree steptype;
5195
5196 if (POINTER_TYPE_P (type))
5197 steptype = sizetype;
5198 else
5199 steptype = unsigned_type_for (type);
5200
5201 /* If AFTER_ADJUST is required, the code below generates the equivalent
5202 of BASE + NITER * STEP + STEP, when ideally we'd prefer the expression
5203 BASE + (NITER + 1) * STEP, especially when NITER is often of the form
5204 SSA_NAME - 1. Unfortunately, guaranteeing that adding 1 to NITER
5205 doesn't overflow is tricky, so we peek inside the TREE_NITER_DESC
5206 class for common idioms that we know are safe. */
5207 if (after_adjust
5208 && desc->control.no_overflow
5209 && integer_onep (desc->control.step)
5210 && (desc->cmp == LT_EXPR
5211 || desc->cmp == NE_EXPR)
5212 && TREE_CODE (desc->bound) == SSA_NAME)
5213 {
5214 if (integer_onep (desc->control.base))
5215 {
5216 niter = desc->bound;
5217 after_adjust = false;
5218 }
5219 else if (TREE_CODE (niter) == MINUS_EXPR
5220 && integer_onep (TREE_OPERAND (niter, 1)))
5221 {
5222 niter = TREE_OPERAND (niter, 0);
5223 after_adjust = false;
5224 }
5225 }
5226
5227 tree_to_aff_combination (iv->step, TREE_TYPE (iv->step), &step);
5228 aff_combination_convert (&step, steptype);
5229 tree_to_aff_combination (niter, TREE_TYPE (niter), &nit);
5230 aff_combination_convert (&nit, steptype);
5231 aff_combination_mult (&nit, &step, &delta);
5232 if (after_adjust)
5233 aff_combination_add (&delta, &step);
5234
5235 tree_to_aff_combination (iv->base, type, val);
5236 if (!POINTER_TYPE_P (type))
5237 aff_combination_convert (val, steptype);
5238 aff_combination_add (val, &delta);
5239 }
5240
5241 /* Returns period of induction variable iv. */
5242
5243 static tree
5244 iv_period (struct iv *iv)
5245 {
5246 tree step = iv->step, period, type;
5247 tree pow2div;
5248
5249 gcc_assert (step && TREE_CODE (step) == INTEGER_CST);
5250
5251 type = unsigned_type_for (TREE_TYPE (step));
5252 /* Period of the iv is lcm (step, type_range)/step -1,
5253 i.e., N*type_range/step - 1. Since type range is power
5254 of two, N == (step >> num_of_ending_zeros_binary (step),
5255 so the final result is
5256
5257 (type_range >> num_of_ending_zeros_binary (step)) - 1
5258
5259 */
5260 pow2div = num_ending_zeros (step);
5261
5262 period = build_low_bits_mask (type,
5263 (TYPE_PRECISION (type)
5264 - tree_to_uhwi (pow2div)));
5265
5266 return period;
5267 }
5268
5269 /* Returns the comparison operator used when eliminating the iv USE. */
5270
5271 static enum tree_code
5272 iv_elimination_compare (struct ivopts_data *data, struct iv_use *use)
5273 {
5274 class loop *loop = data->current_loop;
5275 basic_block ex_bb;
5276 edge exit;
5277
5278 ex_bb = gimple_bb (use->stmt);
5279 exit = EDGE_SUCC (ex_bb, 0);
5280 if (flow_bb_inside_loop_p (loop, exit->dest))
5281 exit = EDGE_SUCC (ex_bb, 1);
5282
5283 return (exit->flags & EDGE_TRUE_VALUE ? EQ_EXPR : NE_EXPR);
5284 }
5285
5286 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
5287 we only detect the situation that BASE = SOMETHING + OFFSET, where the
5288 calculation is performed in non-wrapping type.
5289
5290 TODO: More generally, we could test for the situation that
5291 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
5292 This would require knowing the sign of OFFSET. */
5293
5294 static bool
5295 difference_cannot_overflow_p (struct ivopts_data *data, tree base, tree offset)
5296 {
5297 enum tree_code code;
5298 tree e1, e2;
5299 aff_tree aff_e1, aff_e2, aff_offset;
5300
5301 if (!nowrap_type_p (TREE_TYPE (base)))
5302 return false;
5303
5304 base = expand_simple_operations (base);
5305
5306 if (TREE_CODE (base) == SSA_NAME)
5307 {
5308 gimple *stmt = SSA_NAME_DEF_STMT (base);
5309
5310 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5311 return false;
5312
5313 code = gimple_assign_rhs_code (stmt);
5314 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5315 return false;
5316
5317 e1 = gimple_assign_rhs1 (stmt);
5318 e2 = gimple_assign_rhs2 (stmt);
5319 }
5320 else
5321 {
5322 code = TREE_CODE (base);
5323 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5324 return false;
5325 e1 = TREE_OPERAND (base, 0);
5326 e2 = TREE_OPERAND (base, 1);
5327 }
5328
5329 /* Use affine expansion as deeper inspection to prove the equality. */
5330 tree_to_aff_combination_expand (e2, TREE_TYPE (e2),
5331 &aff_e2, &data->name_expansion_cache);
5332 tree_to_aff_combination_expand (offset, TREE_TYPE (offset),
5333 &aff_offset, &data->name_expansion_cache);
5334 aff_combination_scale (&aff_offset, -1);
5335 switch (code)
5336 {
5337 case PLUS_EXPR:
5338 aff_combination_add (&aff_e2, &aff_offset);
5339 if (aff_combination_zero_p (&aff_e2))
5340 return true;
5341
5342 tree_to_aff_combination_expand (e1, TREE_TYPE (e1),
5343 &aff_e1, &data->name_expansion_cache);
5344 aff_combination_add (&aff_e1, &aff_offset);
5345 return aff_combination_zero_p (&aff_e1);
5346
5347 case POINTER_PLUS_EXPR:
5348 aff_combination_add (&aff_e2, &aff_offset);
5349 return aff_combination_zero_p (&aff_e2);
5350
5351 default:
5352 return false;
5353 }
5354 }
5355
5356 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
5357 comparison with CAND. NITER describes the number of iterations of
5358 the loops. If successful, the comparison in COMP_P is altered accordingly.
5359
5360 We aim to handle the following situation:
5361
5362 sometype *base, *p;
5363 int a, b, i;
5364
5365 i = a;
5366 p = p_0 = base + a;
5367
5368 do
5369 {
5370 bla (*p);
5371 p++;
5372 i++;
5373 }
5374 while (i < b);
5375
5376 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
5377 We aim to optimize this to
5378
5379 p = p_0 = base + a;
5380 do
5381 {
5382 bla (*p);
5383 p++;
5384 }
5385 while (p < p_0 - a + b);
5386
5387 This preserves the correctness, since the pointer arithmetics does not
5388 overflow. More precisely:
5389
5390 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
5391 overflow in computing it or the values of p.
5392 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
5393 overflow. To prove this, we use the fact that p_0 = base + a. */
5394
5395 static bool
5396 iv_elimination_compare_lt (struct ivopts_data *data,
5397 struct iv_cand *cand, enum tree_code *comp_p,
5398 class tree_niter_desc *niter)
5399 {
5400 tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
5401 class aff_tree nit, tmpa, tmpb;
5402 enum tree_code comp;
5403 HOST_WIDE_INT step;
5404
5405 /* We need to know that the candidate induction variable does not overflow.
5406 While more complex analysis may be used to prove this, for now just
5407 check that the variable appears in the original program and that it
5408 is computed in a type that guarantees no overflows. */
5409 cand_type = TREE_TYPE (cand->iv->base);
5410 if (cand->pos != IP_ORIGINAL || !nowrap_type_p (cand_type))
5411 return false;
5412
5413 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
5414 the calculation of the BOUND could overflow, making the comparison
5415 invalid. */
5416 if (!data->loop_single_exit_p)
5417 return false;
5418
5419 /* We need to be able to decide whether candidate is increasing or decreasing
5420 in order to choose the right comparison operator. */
5421 if (!cst_and_fits_in_hwi (cand->iv->step))
5422 return false;
5423 step = int_cst_value (cand->iv->step);
5424
5425 /* Check that the number of iterations matches the expected pattern:
5426 a + 1 > b ? 0 : b - a - 1. */
5427 mbz = niter->may_be_zero;
5428 if (TREE_CODE (mbz) == GT_EXPR)
5429 {
5430 /* Handle a + 1 > b. */
5431 tree op0 = TREE_OPERAND (mbz, 0);
5432 if (TREE_CODE (op0) == PLUS_EXPR && integer_onep (TREE_OPERAND (op0, 1)))
5433 {
5434 a = TREE_OPERAND (op0, 0);
5435 b = TREE_OPERAND (mbz, 1);
5436 }
5437 else
5438 return false;
5439 }
5440 else if (TREE_CODE (mbz) == LT_EXPR)
5441 {
5442 tree op1 = TREE_OPERAND (mbz, 1);
5443
5444 /* Handle b < a + 1. */
5445 if (TREE_CODE (op1) == PLUS_EXPR && integer_onep (TREE_OPERAND (op1, 1)))
5446 {
5447 a = TREE_OPERAND (op1, 0);
5448 b = TREE_OPERAND (mbz, 0);
5449 }
5450 else
5451 return false;
5452 }
5453 else
5454 return false;
5455
5456 /* Expected number of iterations is B - A - 1. Check that it matches
5457 the actual number, i.e., that B - A - NITER = 1. */
5458 tree_to_aff_combination (niter->niter, nit_type, &nit);
5459 tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa);
5460 tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb);
5461 aff_combination_scale (&nit, -1);
5462 aff_combination_scale (&tmpa, -1);
5463 aff_combination_add (&tmpb, &tmpa);
5464 aff_combination_add (&tmpb, &nit);
5465 if (tmpb.n != 0 || maybe_ne (tmpb.offset, 1))
5466 return false;
5467
5468 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
5469 overflow. */
5470 offset = fold_build2 (MULT_EXPR, TREE_TYPE (cand->iv->step),
5471 cand->iv->step,
5472 fold_convert (TREE_TYPE (cand->iv->step), a));
5473 if (!difference_cannot_overflow_p (data, cand->iv->base, offset))
5474 return false;
5475
5476 /* Determine the new comparison operator. */
5477 comp = step < 0 ? GT_EXPR : LT_EXPR;
5478 if (*comp_p == NE_EXPR)
5479 *comp_p = comp;
5480 else if (*comp_p == EQ_EXPR)
5481 *comp_p = invert_tree_comparison (comp, false);
5482 else
5483 gcc_unreachable ();
5484
5485 return true;
5486 }
5487
5488 /* Check whether it is possible to express the condition in USE by comparison
5489 of candidate CAND. If so, store the value compared with to BOUND, and the
5490 comparison operator to COMP. */
5491
5492 static bool
5493 may_eliminate_iv (struct ivopts_data *data,
5494 struct iv_use *use, struct iv_cand *cand, tree *bound,
5495 enum tree_code *comp)
5496 {
5497 basic_block ex_bb;
5498 edge exit;
5499 tree period;
5500 class loop *loop = data->current_loop;
5501 aff_tree bnd;
5502 class tree_niter_desc *desc = NULL;
5503
5504 if (TREE_CODE (cand->iv->step) != INTEGER_CST)
5505 return false;
5506
5507 /* For now works only for exits that dominate the loop latch.
5508 TODO: extend to other conditions inside loop body. */
5509 ex_bb = gimple_bb (use->stmt);
5510 if (use->stmt != last_stmt (ex_bb)
5511 || gimple_code (use->stmt) != GIMPLE_COND
5512 || !dominated_by_p (CDI_DOMINATORS, loop->latch, ex_bb))
5513 return false;
5514
5515 exit = EDGE_SUCC (ex_bb, 0);
5516 if (flow_bb_inside_loop_p (loop, exit->dest))
5517 exit = EDGE_SUCC (ex_bb, 1);
5518 if (flow_bb_inside_loop_p (loop, exit->dest))
5519 return false;
5520
5521 desc = niter_for_exit (data, exit);
5522 if (!desc)
5523 return false;
5524
5525 /* Determine whether we can use the variable to test the exit condition.
5526 This is the case iff the period of the induction variable is greater
5527 than the number of iterations for which the exit condition is true. */
5528 period = iv_period (cand->iv);
5529
5530 /* If the number of iterations is constant, compare against it directly. */
5531 if (TREE_CODE (desc->niter) == INTEGER_CST)
5532 {
5533 /* See cand_value_at. */
5534 if (stmt_after_increment (loop, cand, use->stmt))
5535 {
5536 if (!tree_int_cst_lt (desc->niter, period))
5537 return false;
5538 }
5539 else
5540 {
5541 if (tree_int_cst_lt (period, desc->niter))
5542 return false;
5543 }
5544 }
5545
5546 /* If not, and if this is the only possible exit of the loop, see whether
5547 we can get a conservative estimate on the number of iterations of the
5548 entire loop and compare against that instead. */
5549 else
5550 {
5551 widest_int period_value, max_niter;
5552
5553 max_niter = desc->max;
5554 if (stmt_after_increment (loop, cand, use->stmt))
5555 max_niter += 1;
5556 period_value = wi::to_widest (period);
5557 if (wi::gtu_p (max_niter, period_value))
5558 {
5559 /* See if we can take advantage of inferred loop bound
5560 information. */
5561 if (data->loop_single_exit_p)
5562 {
5563 if (!max_loop_iterations (loop, &max_niter))
5564 return false;
5565 /* The loop bound is already adjusted by adding 1. */
5566 if (wi::gtu_p (max_niter, period_value))
5567 return false;
5568 }
5569 else
5570 return false;
5571 }
5572 }
5573
5574 /* For doloop IV cand, the bound would be zero. It's safe whether
5575 may_be_zero set or not. */
5576 if (cand->doloop_p)
5577 {
5578 *bound = build_int_cst (TREE_TYPE (cand->iv->base), 0);
5579 *comp = iv_elimination_compare (data, use);
5580 return true;
5581 }
5582
5583 cand_value_at (loop, cand, use->stmt, desc, &bnd);
5584
5585 *bound = fold_convert (TREE_TYPE (cand->iv->base),
5586 aff_combination_to_tree (&bnd));
5587 *comp = iv_elimination_compare (data, use);
5588
5589 /* It is unlikely that computing the number of iterations using division
5590 would be more profitable than keeping the original induction variable. */
5591 if (expression_expensive_p (*bound))
5592 return false;
5593
5594 /* Sometimes, it is possible to handle the situation that the number of
5595 iterations may be zero unless additional assumptions by using <
5596 instead of != in the exit condition.
5597
5598 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
5599 base the exit condition on it. However, that is often too
5600 expensive. */
5601 if (!integer_zerop (desc->may_be_zero))
5602 return iv_elimination_compare_lt (data, cand, comp, desc);
5603
5604 return true;
5605 }
5606
5607 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
5608 be copied, if it is used in the loop body and DATA->body_includes_call. */
5609
5610 static int
5611 parm_decl_cost (struct ivopts_data *data, tree bound)
5612 {
5613 tree sbound = bound;
5614 STRIP_NOPS (sbound);
5615
5616 if (TREE_CODE (sbound) == SSA_NAME
5617 && SSA_NAME_IS_DEFAULT_DEF (sbound)
5618 && TREE_CODE (SSA_NAME_VAR (sbound)) == PARM_DECL
5619 && data->body_includes_call)
5620 return COSTS_N_INSNS (1);
5621
5622 return 0;
5623 }
5624
5625 /* Determines cost of computing the use in GROUP with CAND in a condition. */
5626
5627 static bool
5628 determine_group_iv_cost_cond (struct ivopts_data *data,
5629 struct iv_group *group, struct iv_cand *cand)
5630 {
5631 tree bound = NULL_TREE;
5632 struct iv *cmp_iv;
5633 bitmap inv_exprs = NULL;
5634 bitmap inv_vars_elim = NULL, inv_vars_express = NULL, inv_vars;
5635 comp_cost elim_cost = infinite_cost, express_cost, cost, bound_cost;
5636 enum comp_iv_rewrite rewrite_type;
5637 iv_inv_expr_ent *inv_expr_elim = NULL, *inv_expr_express = NULL, *inv_expr;
5638 tree *control_var, *bound_cst;
5639 enum tree_code comp = ERROR_MARK;
5640 struct iv_use *use = group->vuses[0];
5641
5642 /* Extract condition operands. */
5643 rewrite_type = extract_cond_operands (data, use->stmt, &control_var,
5644 &bound_cst, NULL, &cmp_iv);
5645 gcc_assert (rewrite_type != COMP_IV_NA);
5646
5647 /* Try iv elimination. */
5648 if (rewrite_type == COMP_IV_ELIM
5649 && may_eliminate_iv (data, use, cand, &bound, &comp))
5650 {
5651 elim_cost = force_var_cost (data, bound, &inv_vars_elim);
5652 if (elim_cost.cost == 0)
5653 elim_cost.cost = parm_decl_cost (data, bound);
5654 else if (TREE_CODE (bound) == INTEGER_CST)
5655 elim_cost.cost = 0;
5656 /* If we replace a loop condition 'i < n' with 'p < base + n',
5657 inv_vars_elim will have 'base' and 'n' set, which implies that both
5658 'base' and 'n' will be live during the loop. More likely,
5659 'base + n' will be loop invariant, resulting in only one live value
5660 during the loop. So in that case we clear inv_vars_elim and set
5661 inv_expr_elim instead. */
5662 if (inv_vars_elim && bitmap_count_bits (inv_vars_elim) > 1)
5663 {
5664 inv_expr_elim = get_loop_invariant_expr (data, bound);
5665 bitmap_clear (inv_vars_elim);
5666 }
5667 /* The bound is a loop invariant, so it will be only computed
5668 once. */
5669 elim_cost.cost = adjust_setup_cost (data, elim_cost.cost);
5670 }
5671
5672 /* When the condition is a comparison of the candidate IV against
5673 zero, prefer this IV.
5674
5675 TODO: The constant that we're subtracting from the cost should
5676 be target-dependent. This information should be added to the
5677 target costs for each backend. */
5678 if (!elim_cost.infinite_cost_p () /* Do not try to decrease infinite! */
5679 && integer_zerop (*bound_cst)
5680 && (operand_equal_p (*control_var, cand->var_after, 0)
5681 || operand_equal_p (*control_var, cand->var_before, 0)))
5682 elim_cost -= 1;
5683
5684 express_cost = get_computation_cost (data, use, cand, false,
5685 &inv_vars_express, NULL,
5686 &inv_expr_express);
5687 if (cmp_iv != NULL)
5688 find_inv_vars (data, &cmp_iv->base, &inv_vars_express);
5689
5690 /* Count the cost of the original bound as well. */
5691 bound_cost = force_var_cost (data, *bound_cst, NULL);
5692 if (bound_cost.cost == 0)
5693 bound_cost.cost = parm_decl_cost (data, *bound_cst);
5694 else if (TREE_CODE (*bound_cst) == INTEGER_CST)
5695 bound_cost.cost = 0;
5696 express_cost += bound_cost;
5697
5698 /* Choose the better approach, preferring the eliminated IV. */
5699 if (elim_cost <= express_cost)
5700 {
5701 cost = elim_cost;
5702 inv_vars = inv_vars_elim;
5703 inv_vars_elim = NULL;
5704 inv_expr = inv_expr_elim;
5705 /* For doloop candidate/use pair, adjust to zero cost. */
5706 if (group->doloop_p && cand->doloop_p && elim_cost.cost > no_cost.cost)
5707 cost = no_cost;
5708 }
5709 else
5710 {
5711 cost = express_cost;
5712 inv_vars = inv_vars_express;
5713 inv_vars_express = NULL;
5714 bound = NULL_TREE;
5715 comp = ERROR_MARK;
5716 inv_expr = inv_expr_express;
5717 }
5718
5719 if (inv_expr)
5720 {
5721 inv_exprs = BITMAP_ALLOC (NULL);
5722 bitmap_set_bit (inv_exprs, inv_expr->id);
5723 }
5724 set_group_iv_cost (data, group, cand, cost,
5725 inv_vars, bound, comp, inv_exprs);
5726
5727 if (inv_vars_elim)
5728 BITMAP_FREE (inv_vars_elim);
5729 if (inv_vars_express)
5730 BITMAP_FREE (inv_vars_express);
5731
5732 return !cost.infinite_cost_p ();
5733 }
5734
5735 /* Determines cost of computing uses in GROUP with CAND. Returns false
5736 if USE cannot be represented with CAND. */
5737
5738 static bool
5739 determine_group_iv_cost (struct ivopts_data *data,
5740 struct iv_group *group, struct iv_cand *cand)
5741 {
5742 switch (group->type)
5743 {
5744 case USE_NONLINEAR_EXPR:
5745 return determine_group_iv_cost_generic (data, group, cand);
5746
5747 case USE_REF_ADDRESS:
5748 case USE_PTR_ADDRESS:
5749 return determine_group_iv_cost_address (data, group, cand);
5750
5751 case USE_COMPARE:
5752 return determine_group_iv_cost_cond (data, group, cand);
5753
5754 default:
5755 gcc_unreachable ();
5756 }
5757 }
5758
5759 /* Return true if get_computation_cost indicates that autoincrement is
5760 a possibility for the pair of USE and CAND, false otherwise. */
5761
5762 static bool
5763 autoinc_possible_for_pair (struct ivopts_data *data, struct iv_use *use,
5764 struct iv_cand *cand)
5765 {
5766 if (!address_p (use->type))
5767 return false;
5768
5769 bool can_autoinc = false;
5770 get_computation_cost (data, use, cand, true, NULL, &can_autoinc, NULL);
5771 return can_autoinc;
5772 }
5773
5774 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
5775 use that allows autoincrement, and set their AINC_USE if possible. */
5776
5777 static void
5778 set_autoinc_for_original_candidates (struct ivopts_data *data)
5779 {
5780 unsigned i, j;
5781
5782 for (i = 0; i < data->vcands.length (); i++)
5783 {
5784 struct iv_cand *cand = data->vcands[i];
5785 struct iv_use *closest_before = NULL;
5786 struct iv_use *closest_after = NULL;
5787 if (cand->pos != IP_ORIGINAL)
5788 continue;
5789
5790 for (j = 0; j < data->vgroups.length (); j++)
5791 {
5792 struct iv_group *group = data->vgroups[j];
5793 struct iv_use *use = group->vuses[0];
5794 unsigned uid = gimple_uid (use->stmt);
5795
5796 if (gimple_bb (use->stmt) != gimple_bb (cand->incremented_at))
5797 continue;
5798
5799 if (uid < gimple_uid (cand->incremented_at)
5800 && (closest_before == NULL
5801 || uid > gimple_uid (closest_before->stmt)))
5802 closest_before = use;
5803
5804 if (uid > gimple_uid (cand->incremented_at)
5805 && (closest_after == NULL
5806 || uid < gimple_uid (closest_after->stmt)))
5807 closest_after = use;
5808 }
5809
5810 if (closest_before != NULL
5811 && autoinc_possible_for_pair (data, closest_before, cand))
5812 cand->ainc_use = closest_before;
5813 else if (closest_after != NULL
5814 && autoinc_possible_for_pair (data, closest_after, cand))
5815 cand->ainc_use = closest_after;
5816 }
5817 }
5818
5819 /* Relate compare use with all candidates. */
5820
5821 static void
5822 relate_compare_use_with_all_cands (struct ivopts_data *data)
5823 {
5824 unsigned i, count = data->vcands.length ();
5825 for (i = 0; i < data->vgroups.length (); i++)
5826 {
5827 struct iv_group *group = data->vgroups[i];
5828
5829 if (group->type == USE_COMPARE)
5830 bitmap_set_range (group->related_cands, 0, count);
5831 }
5832 }
5833
5834 /* If PREFERRED_MODE is suitable and profitable, use the preferred
5835 PREFERRED_MODE to compute doloop iv base from niter: base = niter + 1. */
5836
5837 static tree
5838 compute_doloop_base_on_mode (machine_mode preferred_mode, tree niter,
5839 const widest_int &iterations_max)
5840 {
5841 tree ntype = TREE_TYPE (niter);
5842 tree pref_type = lang_hooks.types.type_for_mode (preferred_mode, 1);
5843 if (!pref_type)
5844 return fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5845 build_int_cst (ntype, 1));
5846
5847 gcc_assert (TREE_CODE (pref_type) == INTEGER_TYPE);
5848
5849 int prec = TYPE_PRECISION (ntype);
5850 int pref_prec = TYPE_PRECISION (pref_type);
5851
5852 tree base;
5853
5854 /* Check if the PREFERRED_MODED is able to present niter. */
5855 if (pref_prec > prec
5856 || wi::ltu_p (iterations_max,
5857 widest_int::from (wi::max_value (pref_prec, UNSIGNED),
5858 UNSIGNED)))
5859 {
5860 /* No wrap, it is safe to use preferred type after niter + 1. */
5861 if (wi::ltu_p (iterations_max,
5862 widest_int::from (wi::max_value (prec, UNSIGNED),
5863 UNSIGNED)))
5864 {
5865 /* This could help to optimize "-1 +1" pair when niter looks
5866 like "n-1": n is in original mode. "base = (n - 1) + 1"
5867 in PREFERRED_MODED: it could be base = (PREFERRED_TYPE)n. */
5868 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5869 build_int_cst (ntype, 1));
5870 base = fold_convert (pref_type, base);
5871 }
5872
5873 /* To avoid wrap, convert niter to preferred type before plus 1. */
5874 else
5875 {
5876 niter = fold_convert (pref_type, niter);
5877 base = fold_build2 (PLUS_EXPR, pref_type, unshare_expr (niter),
5878 build_int_cst (pref_type, 1));
5879 }
5880 }
5881 else
5882 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5883 build_int_cst (ntype, 1));
5884 return base;
5885 }
5886
5887 /* Add one doloop dedicated IV candidate:
5888 - Base is (may_be_zero ? 1 : (niter + 1)).
5889 - Step is -1. */
5890
5891 static void
5892 add_iv_candidate_for_doloop (struct ivopts_data *data)
5893 {
5894 tree_niter_desc *niter_desc = niter_for_single_dom_exit (data);
5895 gcc_assert (niter_desc && niter_desc->assumptions);
5896
5897 tree niter = niter_desc->niter;
5898 tree ntype = TREE_TYPE (niter);
5899 gcc_assert (TREE_CODE (ntype) == INTEGER_TYPE);
5900
5901 tree may_be_zero = niter_desc->may_be_zero;
5902 if (may_be_zero && integer_zerop (may_be_zero))
5903 may_be_zero = NULL_TREE;
5904 if (may_be_zero)
5905 {
5906 if (COMPARISON_CLASS_P (may_be_zero))
5907 {
5908 niter = fold_build3 (COND_EXPR, ntype, may_be_zero,
5909 build_int_cst (ntype, 0),
5910 rewrite_to_non_trapping_overflow (niter));
5911 }
5912 /* Don't try to obtain the iteration count expression when may_be_zero is
5913 integer_nonzerop (actually iteration count is one) or else. */
5914 else
5915 return;
5916 }
5917
5918 machine_mode mode = TYPE_MODE (ntype);
5919 machine_mode pref_mode = targetm.preferred_doloop_mode (mode);
5920
5921 tree base;
5922 if (mode != pref_mode)
5923 {
5924 base = compute_doloop_base_on_mode (pref_mode, niter, niter_desc->max);
5925 ntype = TREE_TYPE (base);
5926 }
5927 else
5928 base = fold_build2 (PLUS_EXPR, ntype, unshare_expr (niter),
5929 build_int_cst (ntype, 1));
5930
5931
5932 add_candidate (data, base, build_int_cst (ntype, -1), true, NULL, NULL, true);
5933 }
5934
5935 /* Finds the candidates for the induction variables. */
5936
5937 static void
5938 find_iv_candidates (struct ivopts_data *data)
5939 {
5940 /* Add commonly used ivs. */
5941 add_standard_iv_candidates (data);
5942
5943 /* Add doloop dedicated ivs. */
5944 if (data->doloop_use_p)
5945 add_iv_candidate_for_doloop (data);
5946
5947 /* Add old induction variables. */
5948 add_iv_candidate_for_bivs (data);
5949
5950 /* Add induction variables derived from uses. */
5951 add_iv_candidate_for_groups (data);
5952
5953 set_autoinc_for_original_candidates (data);
5954
5955 /* Record the important candidates. */
5956 record_important_candidates (data);
5957
5958 /* Relate compare iv_use with all candidates. */
5959 if (!data->consider_all_candidates)
5960 relate_compare_use_with_all_cands (data);
5961
5962 if (dump_file && (dump_flags & TDF_DETAILS))
5963 {
5964 unsigned i;
5965
5966 fprintf (dump_file, "\n<Important Candidates>:\t");
5967 for (i = 0; i < data->vcands.length (); i++)
5968 if (data->vcands[i]->important)
5969 fprintf (dump_file, " %d,", data->vcands[i]->id);
5970 fprintf (dump_file, "\n");
5971
5972 fprintf (dump_file, "\n<Group, Cand> Related:\n");
5973 for (i = 0; i < data->vgroups.length (); i++)
5974 {
5975 struct iv_group *group = data->vgroups[i];
5976
5977 if (group->related_cands)
5978 {
5979 fprintf (dump_file, " Group %d:\t", group->id);
5980 dump_bitmap (dump_file, group->related_cands);
5981 }
5982 }
5983 fprintf (dump_file, "\n");
5984 }
5985 }
5986
5987 /* Determines costs of computing use of iv with an iv candidate. */
5988
5989 static void
5990 determine_group_iv_costs (struct ivopts_data *data)
5991 {
5992 unsigned i, j;
5993 struct iv_cand *cand;
5994 struct iv_group *group;
5995 bitmap to_clear = BITMAP_ALLOC (NULL);
5996
5997 alloc_use_cost_map (data);
5998
5999 for (i = 0; i < data->vgroups.length (); i++)
6000 {
6001 group = data->vgroups[i];
6002
6003 if (data->consider_all_candidates)
6004 {
6005 for (j = 0; j < data->vcands.length (); j++)
6006 {
6007 cand = data->vcands[j];
6008 determine_group_iv_cost (data, group, cand);
6009 }
6010 }
6011 else
6012 {
6013 bitmap_iterator bi;
6014
6015 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, j, bi)
6016 {
6017 cand = data->vcands[j];
6018 if (!determine_group_iv_cost (data, group, cand))
6019 bitmap_set_bit (to_clear, j);
6020 }
6021
6022 /* Remove the candidates for that the cost is infinite from
6023 the list of related candidates. */
6024 bitmap_and_compl_into (group->related_cands, to_clear);
6025 bitmap_clear (to_clear);
6026 }
6027 }
6028
6029 BITMAP_FREE (to_clear);
6030
6031 if (dump_file && (dump_flags & TDF_DETAILS))
6032 {
6033 bitmap_iterator bi;
6034
6035 /* Dump invariant variables. */
6036 fprintf (dump_file, "\n<Invariant Vars>:\n");
6037 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
6038 {
6039 struct version_info *info = ver_info (data, i);
6040 if (info->inv_id)
6041 {
6042 fprintf (dump_file, "Inv %d:\t", info->inv_id);
6043 print_generic_expr (dump_file, info->name, TDF_SLIM);
6044 fprintf (dump_file, "%s\n",
6045 info->has_nonlin_use ? "" : "\t(eliminable)");
6046 }
6047 }
6048
6049 /* Dump invariant expressions. */
6050 fprintf (dump_file, "\n<Invariant Expressions>:\n");
6051 auto_vec <iv_inv_expr_ent *> list (data->inv_expr_tab->elements ());
6052
6053 for (hash_table<iv_inv_expr_hasher>::iterator it
6054 = data->inv_expr_tab->begin (); it != data->inv_expr_tab->end ();
6055 ++it)
6056 list.safe_push (*it);
6057
6058 list.qsort (sort_iv_inv_expr_ent);
6059
6060 for (i = 0; i < list.length (); ++i)
6061 {
6062 fprintf (dump_file, "inv_expr %d: \t", list[i]->id);
6063 print_generic_expr (dump_file, list[i]->expr, TDF_SLIM);
6064 fprintf (dump_file, "\n");
6065 }
6066
6067 fprintf (dump_file, "\n<Group-candidate Costs>:\n");
6068
6069 for (i = 0; i < data->vgroups.length (); i++)
6070 {
6071 group = data->vgroups[i];
6072
6073 fprintf (dump_file, "Group %d:\n", i);
6074 fprintf (dump_file, " cand\tcost\tcompl.\tinv.expr.\tinv.vars\n");
6075 for (j = 0; j < group->n_map_members; j++)
6076 {
6077 if (!group->cost_map[j].cand
6078 || group->cost_map[j].cost.infinite_cost_p ())
6079 continue;
6080
6081 fprintf (dump_file, " %d\t%" PRId64 "\t%d\t",
6082 group->cost_map[j].cand->id,
6083 group->cost_map[j].cost.cost,
6084 group->cost_map[j].cost.complexity);
6085 if (!group->cost_map[j].inv_exprs
6086 || bitmap_empty_p (group->cost_map[j].inv_exprs))
6087 fprintf (dump_file, "NIL;\t");
6088 else
6089 bitmap_print (dump_file,
6090 group->cost_map[j].inv_exprs, "", ";\t");
6091 if (!group->cost_map[j].inv_vars
6092 || bitmap_empty_p (group->cost_map[j].inv_vars))
6093 fprintf (dump_file, "NIL;\n");
6094 else
6095 bitmap_print (dump_file,
6096 group->cost_map[j].inv_vars, "", "\n");
6097 }
6098
6099 fprintf (dump_file, "\n");
6100 }
6101 fprintf (dump_file, "\n");
6102 }
6103 }
6104
6105 /* Determines cost of the candidate CAND. */
6106
6107 static void
6108 determine_iv_cost (struct ivopts_data *data, struct iv_cand *cand)
6109 {
6110 comp_cost cost_base;
6111 int64_t cost, cost_step;
6112 tree base;
6113
6114 gcc_assert (cand->iv != NULL);
6115
6116 /* There are two costs associated with the candidate -- its increment
6117 and its initialization. The second is almost negligible for any loop
6118 that rolls enough, so we take it just very little into account. */
6119
6120 base = cand->iv->base;
6121 cost_base = force_var_cost (data, base, NULL);
6122 /* It will be exceptional that the iv register happens to be initialized with
6123 the proper value at no cost. In general, there will at least be a regcopy
6124 or a const set. */
6125 if (cost_base.cost == 0)
6126 cost_base.cost = COSTS_N_INSNS (1);
6127 /* Doloop decrement should be considered as zero cost. */
6128 if (cand->doloop_p)
6129 cost_step = 0;
6130 else
6131 cost_step = add_cost (data->speed, TYPE_MODE (TREE_TYPE (base)));
6132 cost = cost_step + adjust_setup_cost (data, cost_base.cost);
6133
6134 /* Prefer the original ivs unless we may gain something by replacing it.
6135 The reason is to make debugging simpler; so this is not relevant for
6136 artificial ivs created by other optimization passes. */
6137 if ((cand->pos != IP_ORIGINAL
6138 || !SSA_NAME_VAR (cand->var_before)
6139 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand->var_before)))
6140 /* Prefer doloop as well. */
6141 && !cand->doloop_p)
6142 cost++;
6143
6144 /* Prefer not to insert statements into latch unless there are some
6145 already (so that we do not create unnecessary jumps). */
6146 if (cand->pos == IP_END
6147 && empty_block_p (ip_end_pos (data->current_loop)))
6148 cost++;
6149
6150 cand->cost = cost;
6151 cand->cost_step = cost_step;
6152 }
6153
6154 /* Determines costs of computation of the candidates. */
6155
6156 static void
6157 determine_iv_costs (struct ivopts_data *data)
6158 {
6159 unsigned i;
6160
6161 if (dump_file && (dump_flags & TDF_DETAILS))
6162 {
6163 fprintf (dump_file, "<Candidate Costs>:\n");
6164 fprintf (dump_file, " cand\tcost\n");
6165 }
6166
6167 for (i = 0; i < data->vcands.length (); i++)
6168 {
6169 struct iv_cand *cand = data->vcands[i];
6170
6171 determine_iv_cost (data, cand);
6172
6173 if (dump_file && (dump_flags & TDF_DETAILS))
6174 fprintf (dump_file, " %d\t%d\n", i, cand->cost);
6175 }
6176
6177 if (dump_file && (dump_flags & TDF_DETAILS))
6178 fprintf (dump_file, "\n");
6179 }
6180
6181 /* Estimate register pressure for loop having N_INVS invariants and N_CANDS
6182 induction variables. Note N_INVS includes both invariant variables and
6183 invariant expressions. */
6184
6185 static unsigned
6186 ivopts_estimate_reg_pressure (struct ivopts_data *data, unsigned n_invs,
6187 unsigned n_cands)
6188 {
6189 unsigned cost;
6190 unsigned n_old = data->regs_used, n_new = n_invs + n_cands;
6191 unsigned regs_needed = n_new + n_old, available_regs = target_avail_regs;
6192 bool speed = data->speed;
6193
6194 /* If there is a call in the loop body, the call-clobbered registers
6195 are not available for loop invariants. */
6196 if (data->body_includes_call)
6197 available_regs = available_regs - target_clobbered_regs;
6198
6199 /* If we have enough registers. */
6200 if (regs_needed + target_res_regs < available_regs)
6201 cost = n_new;
6202 /* If close to running out of registers, try to preserve them. */
6203 else if (regs_needed <= available_regs)
6204 cost = target_reg_cost [speed] * regs_needed;
6205 /* If we run out of available registers but the number of candidates
6206 does not, we penalize extra registers using target_spill_cost. */
6207 else if (n_cands <= available_regs)
6208 cost = target_reg_cost [speed] * available_regs
6209 + target_spill_cost [speed] * (regs_needed - available_regs);
6210 /* If the number of candidates runs out available registers, we penalize
6211 extra candidate registers using target_spill_cost * 2. Because it is
6212 more expensive to spill induction variable than invariant. */
6213 else
6214 cost = target_reg_cost [speed] * available_regs
6215 + target_spill_cost [speed] * (n_cands - available_regs) * 2
6216 + target_spill_cost [speed] * (regs_needed - n_cands);
6217
6218 /* Finally, add the number of candidates, so that we prefer eliminating
6219 induction variables if possible. */
6220 return cost + n_cands;
6221 }
6222
6223 /* For each size of the induction variable set determine the penalty. */
6224
6225 static void
6226 determine_set_costs (struct ivopts_data *data)
6227 {
6228 unsigned j, n;
6229 gphi *phi;
6230 gphi_iterator psi;
6231 tree op;
6232 class loop *loop = data->current_loop;
6233 bitmap_iterator bi;
6234
6235 if (dump_file && (dump_flags & TDF_DETAILS))
6236 {
6237 fprintf (dump_file, "<Global Costs>:\n");
6238 fprintf (dump_file, " target_avail_regs %d\n", target_avail_regs);
6239 fprintf (dump_file, " target_clobbered_regs %d\n", target_clobbered_regs);
6240 fprintf (dump_file, " target_reg_cost %d\n", target_reg_cost[data->speed]);
6241 fprintf (dump_file, " target_spill_cost %d\n", target_spill_cost[data->speed]);
6242 }
6243
6244 n = 0;
6245 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
6246 {
6247 phi = psi.phi ();
6248 op = PHI_RESULT (phi);
6249
6250 if (virtual_operand_p (op))
6251 continue;
6252
6253 if (get_iv (data, op))
6254 continue;
6255
6256 if (!POINTER_TYPE_P (TREE_TYPE (op))
6257 && !INTEGRAL_TYPE_P (TREE_TYPE (op)))
6258 continue;
6259
6260 n++;
6261 }
6262
6263 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
6264 {
6265 struct version_info *info = ver_info (data, j);
6266
6267 if (info->inv_id && info->has_nonlin_use)
6268 n++;
6269 }
6270
6271 data->regs_used = n;
6272 if (dump_file && (dump_flags & TDF_DETAILS))
6273 fprintf (dump_file, " regs_used %d\n", n);
6274
6275 if (dump_file && (dump_flags & TDF_DETAILS))
6276 {
6277 fprintf (dump_file, " cost for size:\n");
6278 fprintf (dump_file, " ivs\tcost\n");
6279 for (j = 0; j <= 2 * target_avail_regs; j++)
6280 fprintf (dump_file, " %d\t%d\n", j,
6281 ivopts_estimate_reg_pressure (data, 0, j));
6282 fprintf (dump_file, "\n");
6283 }
6284 }
6285
6286 /* Returns true if A is a cheaper cost pair than B. */
6287
6288 static bool
6289 cheaper_cost_pair (class cost_pair *a, class cost_pair *b)
6290 {
6291 if (!a)
6292 return false;
6293
6294 if (!b)
6295 return true;
6296
6297 if (a->cost < b->cost)
6298 return true;
6299
6300 if (b->cost < a->cost)
6301 return false;
6302
6303 /* In case the costs are the same, prefer the cheaper candidate. */
6304 if (a->cand->cost < b->cand->cost)
6305 return true;
6306
6307 return false;
6308 }
6309
6310 /* Compare if A is a more expensive cost pair than B. Return 1, 0 and -1
6311 for more expensive, equal and cheaper respectively. */
6312
6313 static int
6314 compare_cost_pair (class cost_pair *a, class cost_pair *b)
6315 {
6316 if (cheaper_cost_pair (a, b))
6317 return -1;
6318 if (cheaper_cost_pair (b, a))
6319 return 1;
6320
6321 return 0;
6322 }
6323
6324 /* Returns candidate by that USE is expressed in IVS. */
6325
6326 static class cost_pair *
6327 iv_ca_cand_for_group (class iv_ca *ivs, struct iv_group *group)
6328 {
6329 return ivs->cand_for_group[group->id];
6330 }
6331
6332 /* Computes the cost field of IVS structure. */
6333
6334 static void
6335 iv_ca_recount_cost (struct ivopts_data *data, class iv_ca *ivs)
6336 {
6337 comp_cost cost = ivs->cand_use_cost;
6338
6339 cost += ivs->cand_cost;
6340 cost += ivopts_estimate_reg_pressure (data, ivs->n_invs, ivs->n_cands);
6341 ivs->cost = cost;
6342 }
6343
6344 /* Remove use of invariants in set INVS by decreasing counter in N_INV_USES
6345 and IVS. */
6346
6347 static void
6348 iv_ca_set_remove_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
6349 {
6350 bitmap_iterator bi;
6351 unsigned iid;
6352
6353 if (!invs)
6354 return;
6355
6356 gcc_assert (n_inv_uses != NULL);
6357 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6358 {
6359 n_inv_uses[iid]--;
6360 if (n_inv_uses[iid] == 0)
6361 ivs->n_invs--;
6362 }
6363 }
6364
6365 /* Set USE not to be expressed by any candidate in IVS. */
6366
6367 static void
6368 iv_ca_set_no_cp (struct ivopts_data *data, class iv_ca *ivs,
6369 struct iv_group *group)
6370 {
6371 unsigned gid = group->id, cid;
6372 class cost_pair *cp;
6373
6374 cp = ivs->cand_for_group[gid];
6375 if (!cp)
6376 return;
6377 cid = cp->cand->id;
6378
6379 ivs->bad_groups++;
6380 ivs->cand_for_group[gid] = NULL;
6381 ivs->n_cand_uses[cid]--;
6382
6383 if (ivs->n_cand_uses[cid] == 0)
6384 {
6385 bitmap_clear_bit (ivs->cands, cid);
6386 if (!cp->cand->doloop_p || !targetm.have_count_reg_decr_p)
6387 ivs->n_cands--;
6388 ivs->cand_cost -= cp->cand->cost;
6389 iv_ca_set_remove_invs (ivs, cp->cand->inv_vars, ivs->n_inv_var_uses);
6390 iv_ca_set_remove_invs (ivs, cp->cand->inv_exprs, ivs->n_inv_expr_uses);
6391 }
6392
6393 ivs->cand_use_cost -= cp->cost;
6394 iv_ca_set_remove_invs (ivs, cp->inv_vars, ivs->n_inv_var_uses);
6395 iv_ca_set_remove_invs (ivs, cp->inv_exprs, ivs->n_inv_expr_uses);
6396 iv_ca_recount_cost (data, ivs);
6397 }
6398
6399 /* Add use of invariants in set INVS by increasing counter in N_INV_USES and
6400 IVS. */
6401
6402 static void
6403 iv_ca_set_add_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
6404 {
6405 bitmap_iterator bi;
6406 unsigned iid;
6407
6408 if (!invs)
6409 return;
6410
6411 gcc_assert (n_inv_uses != NULL);
6412 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6413 {
6414 n_inv_uses[iid]++;
6415 if (n_inv_uses[iid] == 1)
6416 ivs->n_invs++;
6417 }
6418 }
6419
6420 /* Set cost pair for GROUP in set IVS to CP. */
6421
6422 static void
6423 iv_ca_set_cp (struct ivopts_data *data, class iv_ca *ivs,
6424 struct iv_group *group, class cost_pair *cp)
6425 {
6426 unsigned gid = group->id, cid;
6427
6428 if (ivs->cand_for_group[gid] == cp)
6429 return;
6430
6431 if (ivs->cand_for_group[gid])
6432 iv_ca_set_no_cp (data, ivs, group);
6433
6434 if (cp)
6435 {
6436 cid = cp->cand->id;
6437
6438 ivs->bad_groups--;
6439 ivs->cand_for_group[gid] = cp;
6440 ivs->n_cand_uses[cid]++;
6441 if (ivs->n_cand_uses[cid] == 1)
6442 {
6443 bitmap_set_bit (ivs->cands, cid);
6444 if (!cp->cand->doloop_p || !targetm.have_count_reg_decr_p)
6445 ivs->n_cands++;
6446 ivs->cand_cost += cp->cand->cost;
6447 iv_ca_set_add_invs (ivs, cp->cand->inv_vars, ivs->n_inv_var_uses);
6448 iv_ca_set_add_invs (ivs, cp->cand->inv_exprs, ivs->n_inv_expr_uses);
6449 }
6450
6451 ivs->cand_use_cost += cp->cost;
6452 iv_ca_set_add_invs (ivs, cp->inv_vars, ivs->n_inv_var_uses);
6453 iv_ca_set_add_invs (ivs, cp->inv_exprs, ivs->n_inv_expr_uses);
6454 iv_ca_recount_cost (data, ivs);
6455 }
6456 }
6457
6458 /* Extend set IVS by expressing USE by some of the candidates in it
6459 if possible. Consider all important candidates if candidates in
6460 set IVS don't give any result. */
6461
6462 static void
6463 iv_ca_add_group (struct ivopts_data *data, class iv_ca *ivs,
6464 struct iv_group *group)
6465 {
6466 class cost_pair *best_cp = NULL, *cp;
6467 bitmap_iterator bi;
6468 unsigned i;
6469 struct iv_cand *cand;
6470
6471 gcc_assert (ivs->upto >= group->id);
6472 ivs->upto++;
6473 ivs->bad_groups++;
6474
6475 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6476 {
6477 cand = data->vcands[i];
6478 cp = get_group_iv_cost (data, group, cand);
6479 if (cheaper_cost_pair (cp, best_cp))
6480 best_cp = cp;
6481 }
6482
6483 if (best_cp == NULL)
6484 {
6485 EXECUTE_IF_SET_IN_BITMAP (data->important_candidates, 0, i, bi)
6486 {
6487 cand = data->vcands[i];
6488 cp = get_group_iv_cost (data, group, cand);
6489 if (cheaper_cost_pair (cp, best_cp))
6490 best_cp = cp;
6491 }
6492 }
6493
6494 iv_ca_set_cp (data, ivs, group, best_cp);
6495 }
6496
6497 /* Get cost for assignment IVS. */
6498
6499 static comp_cost
6500 iv_ca_cost (class iv_ca *ivs)
6501 {
6502 /* This was a conditional expression but it triggered a bug in
6503 Sun C 5.5. */
6504 if (ivs->bad_groups)
6505 return infinite_cost;
6506 else
6507 return ivs->cost;
6508 }
6509
6510 /* Compare if applying NEW_CP to GROUP for IVS introduces more invariants
6511 than OLD_CP. Return 1, 0 and -1 for more, equal and fewer invariants
6512 respectively. */
6513
6514 static int
6515 iv_ca_compare_deps (struct ivopts_data *data, class iv_ca *ivs,
6516 struct iv_group *group, class cost_pair *old_cp,
6517 class cost_pair *new_cp)
6518 {
6519 gcc_assert (old_cp && new_cp && old_cp != new_cp);
6520 unsigned old_n_invs = ivs->n_invs;
6521 iv_ca_set_cp (data, ivs, group, new_cp);
6522 unsigned new_n_invs = ivs->n_invs;
6523 iv_ca_set_cp (data, ivs, group, old_cp);
6524
6525 return new_n_invs > old_n_invs ? 1 : (new_n_invs < old_n_invs ? -1 : 0);
6526 }
6527
6528 /* Creates change of expressing GROUP by NEW_CP instead of OLD_CP and chains
6529 it before NEXT. */
6530
6531 static struct iv_ca_delta *
6532 iv_ca_delta_add (struct iv_group *group, class cost_pair *old_cp,
6533 class cost_pair *new_cp, struct iv_ca_delta *next)
6534 {
6535 struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
6536
6537 change->group = group;
6538 change->old_cp = old_cp;
6539 change->new_cp = new_cp;
6540 change->next = next;
6541
6542 return change;
6543 }
6544
6545 /* Joins two lists of changes L1 and L2. Destructive -- old lists
6546 are rewritten. */
6547
6548 static struct iv_ca_delta *
6549 iv_ca_delta_join (struct iv_ca_delta *l1, struct iv_ca_delta *l2)
6550 {
6551 struct iv_ca_delta *last;
6552
6553 if (!l2)
6554 return l1;
6555
6556 if (!l1)
6557 return l2;
6558
6559 for (last = l1; last->next; last = last->next)
6560 continue;
6561 last->next = l2;
6562
6563 return l1;
6564 }
6565
6566 /* Reverse the list of changes DELTA, forming the inverse to it. */
6567
6568 static struct iv_ca_delta *
6569 iv_ca_delta_reverse (struct iv_ca_delta *delta)
6570 {
6571 struct iv_ca_delta *act, *next, *prev = NULL;
6572
6573 for (act = delta; act; act = next)
6574 {
6575 next = act->next;
6576 act->next = prev;
6577 prev = act;
6578
6579 std::swap (act->old_cp, act->new_cp);
6580 }
6581
6582 return prev;
6583 }
6584
6585 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
6586 reverted instead. */
6587
6588 static void
6589 iv_ca_delta_commit (struct ivopts_data *data, class iv_ca *ivs,
6590 struct iv_ca_delta *delta, bool forward)
6591 {
6592 class cost_pair *from, *to;
6593 struct iv_ca_delta *act;
6594
6595 if (!forward)
6596 delta = iv_ca_delta_reverse (delta);
6597
6598 for (act = delta; act; act = act->next)
6599 {
6600 from = act->old_cp;
6601 to = act->new_cp;
6602 gcc_assert (iv_ca_cand_for_group (ivs, act->group) == from);
6603 iv_ca_set_cp (data, ivs, act->group, to);
6604 }
6605
6606 if (!forward)
6607 iv_ca_delta_reverse (delta);
6608 }
6609
6610 /* Returns true if CAND is used in IVS. */
6611
6612 static bool
6613 iv_ca_cand_used_p (class iv_ca *ivs, struct iv_cand *cand)
6614 {
6615 return ivs->n_cand_uses[cand->id] > 0;
6616 }
6617
6618 /* Returns number of induction variable candidates in the set IVS. */
6619
6620 static unsigned
6621 iv_ca_n_cands (class iv_ca *ivs)
6622 {
6623 return ivs->n_cands;
6624 }
6625
6626 /* Free the list of changes DELTA. */
6627
6628 static void
6629 iv_ca_delta_free (struct iv_ca_delta **delta)
6630 {
6631 struct iv_ca_delta *act, *next;
6632
6633 for (act = *delta; act; act = next)
6634 {
6635 next = act->next;
6636 free (act);
6637 }
6638
6639 *delta = NULL;
6640 }
6641
6642 /* Allocates new iv candidates assignment. */
6643
6644 static class iv_ca *
6645 iv_ca_new (struct ivopts_data *data)
6646 {
6647 class iv_ca *nw = XNEW (class iv_ca);
6648
6649 nw->upto = 0;
6650 nw->bad_groups = 0;
6651 nw->cand_for_group = XCNEWVEC (class cost_pair *,
6652 data->vgroups.length ());
6653 nw->n_cand_uses = XCNEWVEC (unsigned, data->vcands.length ());
6654 nw->cands = BITMAP_ALLOC (NULL);
6655 nw->n_cands = 0;
6656 nw->n_invs = 0;
6657 nw->cand_use_cost = no_cost;
6658 nw->cand_cost = 0;
6659 nw->n_inv_var_uses = XCNEWVEC (unsigned, data->max_inv_var_id + 1);
6660 nw->n_inv_expr_uses = XCNEWVEC (unsigned, data->max_inv_expr_id + 1);
6661 nw->cost = no_cost;
6662
6663 return nw;
6664 }
6665
6666 /* Free memory occupied by the set IVS. */
6667
6668 static void
6669 iv_ca_free (class iv_ca **ivs)
6670 {
6671 free ((*ivs)->cand_for_group);
6672 free ((*ivs)->n_cand_uses);
6673 BITMAP_FREE ((*ivs)->cands);
6674 free ((*ivs)->n_inv_var_uses);
6675 free ((*ivs)->n_inv_expr_uses);
6676 free (*ivs);
6677 *ivs = NULL;
6678 }
6679
6680 /* Dumps IVS to FILE. */
6681
6682 static void
6683 iv_ca_dump (struct ivopts_data *data, FILE *file, class iv_ca *ivs)
6684 {
6685 unsigned i;
6686 comp_cost cost = iv_ca_cost (ivs);
6687
6688 fprintf (file, " cost: %" PRId64 " (complexity %d)\n", cost.cost,
6689 cost.complexity);
6690 fprintf (file, " reg_cost: %d\n",
6691 ivopts_estimate_reg_pressure (data, ivs->n_invs, ivs->n_cands));
6692 fprintf (file, " cand_cost: %" PRId64 "\n cand_group_cost: "
6693 "%" PRId64 " (complexity %d)\n", ivs->cand_cost,
6694 ivs->cand_use_cost.cost, ivs->cand_use_cost.complexity);
6695 bitmap_print (file, ivs->cands, " candidates: ","\n");
6696
6697 for (i = 0; i < ivs->upto; i++)
6698 {
6699 struct iv_group *group = data->vgroups[i];
6700 class cost_pair *cp = iv_ca_cand_for_group (ivs, group);
6701 if (cp)
6702 fprintf (file, " group:%d --> iv_cand:%d, cost=("
6703 "%" PRId64 ",%d)\n", group->id, cp->cand->id,
6704 cp->cost.cost, cp->cost.complexity);
6705 else
6706 fprintf (file, " group:%d --> ??\n", group->id);
6707 }
6708
6709 const char *pref = "";
6710 fprintf (file, " invariant variables: ");
6711 for (i = 1; i <= data->max_inv_var_id; i++)
6712 if (ivs->n_inv_var_uses[i])
6713 {
6714 fprintf (file, "%s%d", pref, i);
6715 pref = ", ";
6716 }
6717
6718 pref = "";
6719 fprintf (file, "\n invariant expressions: ");
6720 for (i = 1; i <= data->max_inv_expr_id; i++)
6721 if (ivs->n_inv_expr_uses[i])
6722 {
6723 fprintf (file, "%s%d", pref, i);
6724 pref = ", ";
6725 }
6726
6727 fprintf (file, "\n\n");
6728 }
6729
6730 /* Try changing candidate in IVS to CAND for each use. Return cost of the
6731 new set, and store differences in DELTA. Number of induction variables
6732 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
6733 the function will try to find a solution with mimimal iv candidates. */
6734
6735 static comp_cost
6736 iv_ca_extend (struct ivopts_data *data, class iv_ca *ivs,
6737 struct iv_cand *cand, struct iv_ca_delta **delta,
6738 unsigned *n_ivs, bool min_ncand)
6739 {
6740 unsigned i;
6741 comp_cost cost;
6742 struct iv_group *group;
6743 class cost_pair *old_cp, *new_cp;
6744
6745 *delta = NULL;
6746 for (i = 0; i < ivs->upto; i++)
6747 {
6748 group = data->vgroups[i];
6749 old_cp = iv_ca_cand_for_group (ivs, group);
6750
6751 if (old_cp
6752 && old_cp->cand == cand)
6753 continue;
6754
6755 new_cp = get_group_iv_cost (data, group, cand);
6756 if (!new_cp)
6757 continue;
6758
6759 if (!min_ncand)
6760 {
6761 int cmp_invs = iv_ca_compare_deps (data, ivs, group, old_cp, new_cp);
6762 /* Skip if new_cp depends on more invariants. */
6763 if (cmp_invs > 0)
6764 continue;
6765
6766 int cmp_cost = compare_cost_pair (new_cp, old_cp);
6767 /* Skip if new_cp is not cheaper. */
6768 if (cmp_cost > 0 || (cmp_cost == 0 && cmp_invs == 0))
6769 continue;
6770 }
6771
6772 *delta = iv_ca_delta_add (group, old_cp, new_cp, *delta);
6773 }
6774
6775 iv_ca_delta_commit (data, ivs, *delta, true);
6776 cost = iv_ca_cost (ivs);
6777 if (n_ivs)
6778 *n_ivs = iv_ca_n_cands (ivs);
6779 iv_ca_delta_commit (data, ivs, *delta, false);
6780
6781 return cost;
6782 }
6783
6784 /* Try narrowing set IVS by removing CAND. Return the cost of
6785 the new set and store the differences in DELTA. START is
6786 the candidate with which we start narrowing. */
6787
6788 static comp_cost
6789 iv_ca_narrow (struct ivopts_data *data, class iv_ca *ivs,
6790 struct iv_cand *cand, struct iv_cand *start,
6791 struct iv_ca_delta **delta)
6792 {
6793 unsigned i, ci;
6794 struct iv_group *group;
6795 class cost_pair *old_cp, *new_cp, *cp;
6796 bitmap_iterator bi;
6797 struct iv_cand *cnd;
6798 comp_cost cost, best_cost, acost;
6799
6800 *delta = NULL;
6801 for (i = 0; i < data->vgroups.length (); i++)
6802 {
6803 group = data->vgroups[i];
6804
6805 old_cp = iv_ca_cand_for_group (ivs, group);
6806 if (old_cp->cand != cand)
6807 continue;
6808
6809 best_cost = iv_ca_cost (ivs);
6810 /* Start narrowing with START. */
6811 new_cp = get_group_iv_cost (data, group, start);
6812
6813 if (data->consider_all_candidates)
6814 {
6815 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, ci, bi)
6816 {
6817 if (ci == cand->id || (start && ci == start->id))
6818 continue;
6819
6820 cnd = data->vcands[ci];
6821
6822 cp = get_group_iv_cost (data, group, cnd);
6823 if (!cp)
6824 continue;
6825
6826 iv_ca_set_cp (data, ivs, group, cp);
6827 acost = iv_ca_cost (ivs);
6828
6829 if (acost < best_cost)
6830 {
6831 best_cost = acost;
6832 new_cp = cp;
6833 }
6834 }
6835 }
6836 else
6837 {
6838 EXECUTE_IF_AND_IN_BITMAP (group->related_cands, ivs->cands, 0, ci, bi)
6839 {
6840 if (ci == cand->id || (start && ci == start->id))
6841 continue;
6842
6843 cnd = data->vcands[ci];
6844
6845 cp = get_group_iv_cost (data, group, cnd);
6846 if (!cp)
6847 continue;
6848
6849 iv_ca_set_cp (data, ivs, group, cp);
6850 acost = iv_ca_cost (ivs);
6851
6852 if (acost < best_cost)
6853 {
6854 best_cost = acost;
6855 new_cp = cp;
6856 }
6857 }
6858 }
6859 /* Restore to old cp for use. */
6860 iv_ca_set_cp (data, ivs, group, old_cp);
6861
6862 if (!new_cp)
6863 {
6864 iv_ca_delta_free (delta);
6865 return infinite_cost;
6866 }
6867
6868 *delta = iv_ca_delta_add (group, old_cp, new_cp, *delta);
6869 }
6870
6871 iv_ca_delta_commit (data, ivs, *delta, true);
6872 cost = iv_ca_cost (ivs);
6873 iv_ca_delta_commit (data, ivs, *delta, false);
6874
6875 return cost;
6876 }
6877
6878 /* Try optimizing the set of candidates IVS by removing candidates different
6879 from to EXCEPT_CAND from it. Return cost of the new set, and store
6880 differences in DELTA. */
6881
6882 static comp_cost
6883 iv_ca_prune (struct ivopts_data *data, class iv_ca *ivs,
6884 struct iv_cand *except_cand, struct iv_ca_delta **delta)
6885 {
6886 bitmap_iterator bi;
6887 struct iv_ca_delta *act_delta, *best_delta;
6888 unsigned i;
6889 comp_cost best_cost, acost;
6890 struct iv_cand *cand;
6891
6892 best_delta = NULL;
6893 best_cost = iv_ca_cost (ivs);
6894
6895 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6896 {
6897 cand = data->vcands[i];
6898
6899 if (cand == except_cand)
6900 continue;
6901
6902 acost = iv_ca_narrow (data, ivs, cand, except_cand, &act_delta);
6903
6904 if (acost < best_cost)
6905 {
6906 best_cost = acost;
6907 iv_ca_delta_free (&best_delta);
6908 best_delta = act_delta;
6909 }
6910 else
6911 iv_ca_delta_free (&act_delta);
6912 }
6913
6914 if (!best_delta)
6915 {
6916 *delta = NULL;
6917 return best_cost;
6918 }
6919
6920 /* Recurse to possibly remove other unnecessary ivs. */
6921 iv_ca_delta_commit (data, ivs, best_delta, true);
6922 best_cost = iv_ca_prune (data, ivs, except_cand, delta);
6923 iv_ca_delta_commit (data, ivs, best_delta, false);
6924 *delta = iv_ca_delta_join (best_delta, *delta);
6925 return best_cost;
6926 }
6927
6928 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
6929 cheaper local cost for GROUP than BEST_CP. Return pointer to
6930 the corresponding cost_pair, otherwise just return BEST_CP. */
6931
6932 static class cost_pair*
6933 cheaper_cost_with_cand (struct ivopts_data *data, struct iv_group *group,
6934 unsigned int cand_idx, struct iv_cand *old_cand,
6935 class cost_pair *best_cp)
6936 {
6937 struct iv_cand *cand;
6938 class cost_pair *cp;
6939
6940 gcc_assert (old_cand != NULL && best_cp != NULL);
6941 if (cand_idx == old_cand->id)
6942 return best_cp;
6943
6944 cand = data->vcands[cand_idx];
6945 cp = get_group_iv_cost (data, group, cand);
6946 if (cp != NULL && cheaper_cost_pair (cp, best_cp))
6947 return cp;
6948
6949 return best_cp;
6950 }
6951
6952 /* Try breaking local optimal fixed-point for IVS by replacing candidates
6953 which are used by more than one iv uses. For each of those candidates,
6954 this function tries to represent iv uses under that candidate using
6955 other ones with lower local cost, then tries to prune the new set.
6956 If the new set has lower cost, It returns the new cost after recording
6957 candidate replacement in list DELTA. */
6958
6959 static comp_cost
6960 iv_ca_replace (struct ivopts_data *data, class iv_ca *ivs,
6961 struct iv_ca_delta **delta)
6962 {
6963 bitmap_iterator bi, bj;
6964 unsigned int i, j, k;
6965 struct iv_cand *cand;
6966 comp_cost orig_cost, acost;
6967 struct iv_ca_delta *act_delta, *tmp_delta;
6968 class cost_pair *old_cp, *best_cp = NULL;
6969
6970 *delta = NULL;
6971 orig_cost = iv_ca_cost (ivs);
6972
6973 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6974 {
6975 if (ivs->n_cand_uses[i] == 1
6976 || ivs->n_cand_uses[i] > ALWAYS_PRUNE_CAND_SET_BOUND)
6977 continue;
6978
6979 cand = data->vcands[i];
6980
6981 act_delta = NULL;
6982 /* Represent uses under current candidate using other ones with
6983 lower local cost. */
6984 for (j = 0; j < ivs->upto; j++)
6985 {
6986 struct iv_group *group = data->vgroups[j];
6987 old_cp = iv_ca_cand_for_group (ivs, group);
6988
6989 if (old_cp->cand != cand)
6990 continue;
6991
6992 best_cp = old_cp;
6993 if (data->consider_all_candidates)
6994 for (k = 0; k < data->vcands.length (); k++)
6995 best_cp = cheaper_cost_with_cand (data, group, k,
6996 old_cp->cand, best_cp);
6997 else
6998 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, k, bj)
6999 best_cp = cheaper_cost_with_cand (data, group, k,
7000 old_cp->cand, best_cp);
7001
7002 if (best_cp == old_cp)
7003 continue;
7004
7005 act_delta = iv_ca_delta_add (group, old_cp, best_cp, act_delta);
7006 }
7007 /* No need for further prune. */
7008 if (!act_delta)
7009 continue;
7010
7011 /* Prune the new candidate set. */
7012 iv_ca_delta_commit (data, ivs, act_delta, true);
7013 acost = iv_ca_prune (data, ivs, NULL, &tmp_delta);
7014 iv_ca_delta_commit (data, ivs, act_delta, false);
7015 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
7016
7017 if (acost < orig_cost)
7018 {
7019 *delta = act_delta;
7020 return acost;
7021 }
7022 else
7023 iv_ca_delta_free (&act_delta);
7024 }
7025
7026 return orig_cost;
7027 }
7028
7029 /* Tries to extend the sets IVS in the best possible way in order to
7030 express the GROUP. If ORIGINALP is true, prefer candidates from
7031 the original set of IVs, otherwise favor important candidates not
7032 based on any memory object. */
7033
7034 static bool
7035 try_add_cand_for (struct ivopts_data *data, class iv_ca *ivs,
7036 struct iv_group *group, bool originalp)
7037 {
7038 comp_cost best_cost, act_cost;
7039 unsigned i;
7040 bitmap_iterator bi;
7041 struct iv_cand *cand;
7042 struct iv_ca_delta *best_delta = NULL, *act_delta;
7043 class cost_pair *cp;
7044
7045 iv_ca_add_group (data, ivs, group);
7046 best_cost = iv_ca_cost (ivs);
7047 cp = iv_ca_cand_for_group (ivs, group);
7048 if (cp)
7049 {
7050 best_delta = iv_ca_delta_add (group, NULL, cp, NULL);
7051 iv_ca_set_no_cp (data, ivs, group);
7052 }
7053
7054 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
7055 first try important candidates not based on any memory object. Only if
7056 this fails, try the specific ones. Rationale -- in loops with many
7057 variables the best choice often is to use just one generic biv. If we
7058 added here many ivs specific to the uses, the optimization algorithm later
7059 would be likely to get stuck in a local minimum, thus causing us to create
7060 too many ivs. The approach from few ivs to more seems more likely to be
7061 successful -- starting from few ivs, replacing an expensive use by a
7062 specific iv should always be a win. */
7063 EXECUTE_IF_SET_IN_BITMAP (group->related_cands, 0, i, bi)
7064 {
7065 cand = data->vcands[i];
7066
7067 if (originalp && cand->pos !=IP_ORIGINAL)
7068 continue;
7069
7070 if (!originalp && cand->iv->base_object != NULL_TREE)
7071 continue;
7072
7073 if (iv_ca_cand_used_p (ivs, cand))
7074 continue;
7075
7076 cp = get_group_iv_cost (data, group, cand);
7077 if (!cp)
7078 continue;
7079
7080 iv_ca_set_cp (data, ivs, group, cp);
7081 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL,
7082 true);
7083 iv_ca_set_no_cp (data, ivs, group);
7084 act_delta = iv_ca_delta_add (group, NULL, cp, act_delta);
7085
7086 if (act_cost < best_cost)
7087 {
7088 best_cost = act_cost;
7089
7090 iv_ca_delta_free (&best_delta);
7091 best_delta = act_delta;
7092 }
7093 else
7094 iv_ca_delta_free (&act_delta);
7095 }
7096
7097 if (best_cost.infinite_cost_p ())
7098 {
7099 for (i = 0; i < group->n_map_members; i++)
7100 {
7101 cp = group->cost_map + i;
7102 cand = cp->cand;
7103 if (!cand)
7104 continue;
7105
7106 /* Already tried this. */
7107 if (cand->important)
7108 {
7109 if (originalp && cand->pos == IP_ORIGINAL)
7110 continue;
7111 if (!originalp && cand->iv->base_object == NULL_TREE)
7112 continue;
7113 }
7114
7115 if (iv_ca_cand_used_p (ivs, cand))
7116 continue;
7117
7118 act_delta = NULL;
7119 iv_ca_set_cp (data, ivs, group, cp);
7120 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL, true);
7121 iv_ca_set_no_cp (data, ivs, group);
7122 act_delta = iv_ca_delta_add (group,
7123 iv_ca_cand_for_group (ivs, group),
7124 cp, act_delta);
7125
7126 if (act_cost < best_cost)
7127 {
7128 best_cost = act_cost;
7129
7130 if (best_delta)
7131 iv_ca_delta_free (&best_delta);
7132 best_delta = act_delta;
7133 }
7134 else
7135 iv_ca_delta_free (&act_delta);
7136 }
7137 }
7138
7139 iv_ca_delta_commit (data, ivs, best_delta, true);
7140 iv_ca_delta_free (&best_delta);
7141
7142 return !best_cost.infinite_cost_p ();
7143 }
7144
7145 /* Finds an initial assignment of candidates to uses. */
7146
7147 static class iv_ca *
7148 get_initial_solution (struct ivopts_data *data, bool originalp)
7149 {
7150 unsigned i;
7151 class iv_ca *ivs = iv_ca_new (data);
7152
7153 for (i = 0; i < data->vgroups.length (); i++)
7154 if (!try_add_cand_for (data, ivs, data->vgroups[i], originalp))
7155 {
7156 iv_ca_free (&ivs);
7157 return NULL;
7158 }
7159
7160 return ivs;
7161 }
7162
7163 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
7164 points to a bool variable, this function tries to break local
7165 optimal fixed-point by replacing candidates in IVS if it's true. */
7166
7167 static bool
7168 try_improve_iv_set (struct ivopts_data *data,
7169 class iv_ca *ivs, bool *try_replace_p)
7170 {
7171 unsigned i, n_ivs;
7172 comp_cost acost, best_cost = iv_ca_cost (ivs);
7173 struct iv_ca_delta *best_delta = NULL, *act_delta, *tmp_delta;
7174 struct iv_cand *cand;
7175
7176 /* Try extending the set of induction variables by one. */
7177 for (i = 0; i < data->vcands.length (); i++)
7178 {
7179 cand = data->vcands[i];
7180
7181 if (iv_ca_cand_used_p (ivs, cand))
7182 continue;
7183
7184 acost = iv_ca_extend (data, ivs, cand, &act_delta, &n_ivs, false);
7185 if (!act_delta)
7186 continue;
7187
7188 /* If we successfully added the candidate and the set is small enough,
7189 try optimizing it by removing other candidates. */
7190 if (n_ivs <= ALWAYS_PRUNE_CAND_SET_BOUND)
7191 {
7192 iv_ca_delta_commit (data, ivs, act_delta, true);
7193 acost = iv_ca_prune (data, ivs, cand, &tmp_delta);
7194 iv_ca_delta_commit (data, ivs, act_delta, false);
7195 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
7196 }
7197
7198 if (acost < best_cost)
7199 {
7200 best_cost = acost;
7201 iv_ca_delta_free (&best_delta);
7202 best_delta = act_delta;
7203 }
7204 else
7205 iv_ca_delta_free (&act_delta);
7206 }
7207
7208 if (!best_delta)
7209 {
7210 /* Try removing the candidates from the set instead. */
7211 best_cost = iv_ca_prune (data, ivs, NULL, &best_delta);
7212
7213 if (!best_delta && *try_replace_p)
7214 {
7215 *try_replace_p = false;
7216 /* So far candidate selecting algorithm tends to choose fewer IVs
7217 so that it can handle cases in which loops have many variables
7218 but the best choice is often to use only one general biv. One
7219 weakness is it can't handle opposite cases, in which different
7220 candidates should be chosen with respect to each use. To solve
7221 the problem, we replace candidates in a manner described by the
7222 comments of iv_ca_replace, thus give general algorithm a chance
7223 to break local optimal fixed-point in these cases. */
7224 best_cost = iv_ca_replace (data, ivs, &best_delta);
7225 }
7226
7227 if (!best_delta)
7228 return false;
7229 }
7230
7231 iv_ca_delta_commit (data, ivs, best_delta, true);
7232 iv_ca_delta_free (&best_delta);
7233 return best_cost == iv_ca_cost (ivs);
7234 }
7235
7236 /* Attempts to find the optimal set of induction variables. We do simple
7237 greedy heuristic -- we try to replace at most one candidate in the selected
7238 solution and remove the unused ivs while this improves the cost. */
7239
7240 static class iv_ca *
7241 find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
7242 {
7243 class iv_ca *set;
7244 bool try_replace_p = true;
7245
7246 /* Get the initial solution. */
7247 set = get_initial_solution (data, originalp);
7248 if (!set)
7249 {
7250 if (dump_file && (dump_flags & TDF_DETAILS))
7251 fprintf (dump_file, "Unable to substitute for ivs, failed.\n");
7252 return NULL;
7253 }
7254
7255 if (dump_file && (dump_flags & TDF_DETAILS))
7256 {
7257 fprintf (dump_file, "Initial set of candidates:\n");
7258 iv_ca_dump (data, dump_file, set);
7259 }
7260
7261 while (try_improve_iv_set (data, set, &try_replace_p))
7262 {
7263 if (dump_file && (dump_flags & TDF_DETAILS))
7264 {
7265 fprintf (dump_file, "Improved to:\n");
7266 iv_ca_dump (data, dump_file, set);
7267 }
7268 }
7269
7270 /* If the set has infinite_cost, it can't be optimal. */
7271 if (iv_ca_cost (set).infinite_cost_p ())
7272 {
7273 if (dump_file && (dump_flags & TDF_DETAILS))
7274 fprintf (dump_file,
7275 "Overflow to infinite cost in try_improve_iv_set.\n");
7276 iv_ca_free (&set);
7277 }
7278 return set;
7279 }
7280
7281 static class iv_ca *
7282 find_optimal_iv_set (struct ivopts_data *data)
7283 {
7284 unsigned i;
7285 comp_cost cost, origcost;
7286 class iv_ca *set, *origset;
7287
7288 /* Determine the cost based on a strategy that starts with original IVs,
7289 and try again using a strategy that prefers candidates not based
7290 on any IVs. */
7291 origset = find_optimal_iv_set_1 (data, true);
7292 set = find_optimal_iv_set_1 (data, false);
7293
7294 if (!origset && !set)
7295 return NULL;
7296
7297 origcost = origset ? iv_ca_cost (origset) : infinite_cost;
7298 cost = set ? iv_ca_cost (set) : infinite_cost;
7299
7300 if (dump_file && (dump_flags & TDF_DETAILS))
7301 {
7302 fprintf (dump_file, "Original cost %" PRId64 " (complexity %d)\n\n",
7303 origcost.cost, origcost.complexity);
7304 fprintf (dump_file, "Final cost %" PRId64 " (complexity %d)\n\n",
7305 cost.cost, cost.complexity);
7306 }
7307
7308 /* Choose the one with the best cost. */
7309 if (origcost <= cost)
7310 {
7311 if (set)
7312 iv_ca_free (&set);
7313 set = origset;
7314 }
7315 else if (origset)
7316 iv_ca_free (&origset);
7317
7318 for (i = 0; i < data->vgroups.length (); i++)
7319 {
7320 struct iv_group *group = data->vgroups[i];
7321 group->selected = iv_ca_cand_for_group (set, group)->cand;
7322 }
7323
7324 return set;
7325 }
7326
7327 /* Creates a new induction variable corresponding to CAND. */
7328
7329 static void
7330 create_new_iv (struct ivopts_data *data, struct iv_cand *cand)
7331 {
7332 gimple_stmt_iterator incr_pos;
7333 tree base;
7334 struct iv_use *use;
7335 struct iv_group *group;
7336 bool after = false;
7337
7338 gcc_assert (cand->iv != NULL);
7339
7340 switch (cand->pos)
7341 {
7342 case IP_NORMAL:
7343 incr_pos = gsi_last_bb (ip_normal_pos (data->current_loop));
7344 break;
7345
7346 case IP_END:
7347 incr_pos = gsi_last_bb (ip_end_pos (data->current_loop));
7348 after = true;
7349 break;
7350
7351 case IP_AFTER_USE:
7352 after = true;
7353 /* fall through */
7354 case IP_BEFORE_USE:
7355 incr_pos = gsi_for_stmt (cand->incremented_at);
7356 break;
7357
7358 case IP_ORIGINAL:
7359 /* Mark that the iv is preserved. */
7360 name_info (data, cand->var_before)->preserve_biv = true;
7361 name_info (data, cand->var_after)->preserve_biv = true;
7362
7363 /* Rewrite the increment so that it uses var_before directly. */
7364 use = find_interesting_uses_op (data, cand->var_after);
7365 group = data->vgroups[use->group_id];
7366 group->selected = cand;
7367 return;
7368 }
7369
7370 gimple_add_tmp_var (cand->var_before);
7371
7372 base = unshare_expr (cand->iv->base);
7373
7374 create_iv (base, unshare_expr (cand->iv->step),
7375 cand->var_before, data->current_loop,
7376 &incr_pos, after, &cand->var_before, &cand->var_after);
7377 }
7378
7379 /* Creates new induction variables described in SET. */
7380
7381 static void
7382 create_new_ivs (struct ivopts_data *data, class iv_ca *set)
7383 {
7384 unsigned i;
7385 struct iv_cand *cand;
7386 bitmap_iterator bi;
7387
7388 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7389 {
7390 cand = data->vcands[i];
7391 create_new_iv (data, cand);
7392 }
7393
7394 if (dump_file && (dump_flags & TDF_DETAILS))
7395 {
7396 fprintf (dump_file, "Selected IV set for loop %d",
7397 data->current_loop->num);
7398 if (data->loop_loc != UNKNOWN_LOCATION)
7399 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
7400 LOCATION_LINE (data->loop_loc));
7401 fprintf (dump_file, ", " HOST_WIDE_INT_PRINT_DEC " avg niters",
7402 avg_loop_niter (data->current_loop));
7403 fprintf (dump_file, ", %lu IVs:\n", bitmap_count_bits (set->cands));
7404 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7405 {
7406 cand = data->vcands[i];
7407 dump_cand (dump_file, cand);
7408 }
7409 fprintf (dump_file, "\n");
7410 }
7411 }
7412
7413 /* Rewrites USE (definition of iv used in a nonlinear expression)
7414 using candidate CAND. */
7415
7416 static void
7417 rewrite_use_nonlinear_expr (struct ivopts_data *data,
7418 struct iv_use *use, struct iv_cand *cand)
7419 {
7420 gassign *ass;
7421 gimple_stmt_iterator bsi;
7422 tree comp, type = get_use_type (use), tgt;
7423
7424 /* An important special case -- if we are asked to express value of
7425 the original iv by itself, just exit; there is no need to
7426 introduce a new computation (that might also need casting the
7427 variable to unsigned and back). */
7428 if (cand->pos == IP_ORIGINAL
7429 && cand->incremented_at == use->stmt)
7430 {
7431 tree op = NULL_TREE;
7432 enum tree_code stmt_code;
7433
7434 gcc_assert (is_gimple_assign (use->stmt));
7435 gcc_assert (gimple_assign_lhs (use->stmt) == cand->var_after);
7436
7437 /* Check whether we may leave the computation unchanged.
7438 This is the case only if it does not rely on other
7439 computations in the loop -- otherwise, the computation
7440 we rely upon may be removed in remove_unused_ivs,
7441 thus leading to ICE. */
7442 stmt_code = gimple_assign_rhs_code (use->stmt);
7443 if (stmt_code == PLUS_EXPR
7444 || stmt_code == MINUS_EXPR
7445 || stmt_code == POINTER_PLUS_EXPR)
7446 {
7447 if (gimple_assign_rhs1 (use->stmt) == cand->var_before)
7448 op = gimple_assign_rhs2 (use->stmt);
7449 else if (gimple_assign_rhs2 (use->stmt) == cand->var_before)
7450 op = gimple_assign_rhs1 (use->stmt);
7451 }
7452
7453 if (op != NULL_TREE)
7454 {
7455 if (expr_invariant_in_loop_p (data->current_loop, op))
7456 return;
7457 if (TREE_CODE (op) == SSA_NAME)
7458 {
7459 struct iv *iv = get_iv (data, op);
7460 if (iv != NULL && integer_zerop (iv->step))
7461 return;
7462 }
7463 }
7464 }
7465
7466 switch (gimple_code (use->stmt))
7467 {
7468 case GIMPLE_PHI:
7469 tgt = PHI_RESULT (use->stmt);
7470
7471 /* If we should keep the biv, do not replace it. */
7472 if (name_info (data, tgt)->preserve_biv)
7473 return;
7474
7475 bsi = gsi_after_labels (gimple_bb (use->stmt));
7476 break;
7477
7478 case GIMPLE_ASSIGN:
7479 tgt = gimple_assign_lhs (use->stmt);
7480 bsi = gsi_for_stmt (use->stmt);
7481 break;
7482
7483 default:
7484 gcc_unreachable ();
7485 }
7486
7487 aff_tree aff_inv, aff_var;
7488 if (!get_computation_aff_1 (data->current_loop, use->stmt,
7489 use, cand, &aff_inv, &aff_var))
7490 gcc_unreachable ();
7491
7492 unshare_aff_combination (&aff_inv);
7493 unshare_aff_combination (&aff_var);
7494 /* Prefer CSE opportunity than loop invariant by adding offset at last
7495 so that iv_uses have different offsets can be CSEed. */
7496 poly_widest_int offset = aff_inv.offset;
7497 aff_inv.offset = 0;
7498
7499 gimple_seq stmt_list = NULL, seq = NULL;
7500 tree comp_op1 = aff_combination_to_tree (&aff_inv);
7501 tree comp_op2 = aff_combination_to_tree (&aff_var);
7502 gcc_assert (comp_op1 && comp_op2);
7503
7504 comp_op1 = force_gimple_operand (comp_op1, &seq, true, NULL);
7505 gimple_seq_add_seq (&stmt_list, seq);
7506 comp_op2 = force_gimple_operand (comp_op2, &seq, true, NULL);
7507 gimple_seq_add_seq (&stmt_list, seq);
7508
7509 if (POINTER_TYPE_P (TREE_TYPE (comp_op2)))
7510 std::swap (comp_op1, comp_op2);
7511
7512 if (POINTER_TYPE_P (TREE_TYPE (comp_op1)))
7513 {
7514 comp = fold_build_pointer_plus (comp_op1,
7515 fold_convert (sizetype, comp_op2));
7516 comp = fold_build_pointer_plus (comp,
7517 wide_int_to_tree (sizetype, offset));
7518 }
7519 else
7520 {
7521 comp = fold_build2 (PLUS_EXPR, TREE_TYPE (comp_op1), comp_op1,
7522 fold_convert (TREE_TYPE (comp_op1), comp_op2));
7523 comp = fold_build2 (PLUS_EXPR, TREE_TYPE (comp_op1), comp,
7524 wide_int_to_tree (TREE_TYPE (comp_op1), offset));
7525 }
7526
7527 comp = fold_convert (type, comp);
7528 comp = force_gimple_operand (comp, &seq, false, NULL);
7529 gimple_seq_add_seq (&stmt_list, seq);
7530 if (gimple_code (use->stmt) != GIMPLE_PHI
7531 /* We can't allow re-allocating the stmt as it might be pointed
7532 to still. */
7533 && (get_gimple_rhs_num_ops (TREE_CODE (comp))
7534 >= gimple_num_ops (gsi_stmt (bsi))))
7535 {
7536 comp = force_gimple_operand (comp, &seq, true, NULL);
7537 gimple_seq_add_seq (&stmt_list, seq);
7538 if (POINTER_TYPE_P (TREE_TYPE (tgt)))
7539 {
7540 duplicate_ssa_name_ptr_info (comp, SSA_NAME_PTR_INFO (tgt));
7541 /* As this isn't a plain copy we have to reset alignment
7542 information. */
7543 if (SSA_NAME_PTR_INFO (comp))
7544 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp));
7545 }
7546 }
7547
7548 gsi_insert_seq_before (&bsi, stmt_list, GSI_SAME_STMT);
7549 if (gimple_code (use->stmt) == GIMPLE_PHI)
7550 {
7551 ass = gimple_build_assign (tgt, comp);
7552 gsi_insert_before (&bsi, ass, GSI_SAME_STMT);
7553
7554 bsi = gsi_for_stmt (use->stmt);
7555 remove_phi_node (&bsi, false);
7556 }
7557 else
7558 {
7559 gimple_assign_set_rhs_from_tree (&bsi, comp);
7560 use->stmt = gsi_stmt (bsi);
7561 }
7562 }
7563
7564 /* Performs a peephole optimization to reorder the iv update statement with
7565 a mem ref to enable instruction combining in later phases. The mem ref uses
7566 the iv value before the update, so the reordering transformation requires
7567 adjustment of the offset. CAND is the selected IV_CAND.
7568
7569 Example:
7570
7571 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
7572 iv2 = iv1 + 1;
7573
7574 if (t < val) (1)
7575 goto L;
7576 goto Head;
7577
7578
7579 directly propagating t over to (1) will introduce overlapping live range
7580 thus increase register pressure. This peephole transform it into:
7581
7582
7583 iv2 = iv1 + 1;
7584 t = MEM_REF (base, iv2, 8, 8);
7585 if (t < val)
7586 goto L;
7587 goto Head;
7588 */
7589
7590 static void
7591 adjust_iv_update_pos (struct iv_cand *cand, struct iv_use *use)
7592 {
7593 tree var_after;
7594 gimple *iv_update, *stmt;
7595 basic_block bb;
7596 gimple_stmt_iterator gsi, gsi_iv;
7597
7598 if (cand->pos != IP_NORMAL)
7599 return;
7600
7601 var_after = cand->var_after;
7602 iv_update = SSA_NAME_DEF_STMT (var_after);
7603
7604 bb = gimple_bb (iv_update);
7605 gsi = gsi_last_nondebug_bb (bb);
7606 stmt = gsi_stmt (gsi);
7607
7608 /* Only handle conditional statement for now. */
7609 if (gimple_code (stmt) != GIMPLE_COND)
7610 return;
7611
7612 gsi_prev_nondebug (&gsi);
7613 stmt = gsi_stmt (gsi);
7614 if (stmt != iv_update)
7615 return;
7616
7617 gsi_prev_nondebug (&gsi);
7618 if (gsi_end_p (gsi))
7619 return;
7620
7621 stmt = gsi_stmt (gsi);
7622 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7623 return;
7624
7625 if (stmt != use->stmt)
7626 return;
7627
7628 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
7629 return;
7630
7631 if (dump_file && (dump_flags & TDF_DETAILS))
7632 {
7633 fprintf (dump_file, "Reordering \n");
7634 print_gimple_stmt (dump_file, iv_update, 0);
7635 print_gimple_stmt (dump_file, use->stmt, 0);
7636 fprintf (dump_file, "\n");
7637 }
7638
7639 gsi = gsi_for_stmt (use->stmt);
7640 gsi_iv = gsi_for_stmt (iv_update);
7641 gsi_move_before (&gsi_iv, &gsi);
7642
7643 cand->pos = IP_BEFORE_USE;
7644 cand->incremented_at = use->stmt;
7645 }
7646
7647 /* Return the alias pointer type that should be used for a MEM_REF
7648 associated with USE, which has type USE_PTR_ADDRESS. */
7649
7650 static tree
7651 get_alias_ptr_type_for_ptr_address (iv_use *use)
7652 {
7653 gcall *call = as_a <gcall *> (use->stmt);
7654 switch (gimple_call_internal_fn (call))
7655 {
7656 case IFN_MASK_LOAD:
7657 case IFN_MASK_STORE:
7658 case IFN_MASK_LOAD_LANES:
7659 case IFN_MASK_STORE_LANES:
7660 case IFN_LEN_LOAD:
7661 case IFN_LEN_STORE:
7662 /* The second argument contains the correct alias type. */
7663 gcc_assert (use->op_p = gimple_call_arg_ptr (call, 0));
7664 return TREE_TYPE (gimple_call_arg (call, 1));
7665
7666 default:
7667 gcc_unreachable ();
7668 }
7669 }
7670
7671
7672 /* Rewrites USE (address that is an iv) using candidate CAND. */
7673
7674 static void
7675 rewrite_use_address (struct ivopts_data *data,
7676 struct iv_use *use, struct iv_cand *cand)
7677 {
7678 aff_tree aff;
7679 bool ok;
7680
7681 adjust_iv_update_pos (cand, use);
7682 ok = get_computation_aff (data->current_loop, use->stmt, use, cand, &aff);
7683 gcc_assert (ok);
7684 unshare_aff_combination (&aff);
7685
7686 /* To avoid undefined overflow problems, all IV candidates use unsigned
7687 integer types. The drawback is that this makes it impossible for
7688 create_mem_ref to distinguish an IV that is based on a memory object
7689 from one that represents simply an offset.
7690
7691 To work around this problem, we pass a hint to create_mem_ref that
7692 indicates which variable (if any) in aff is an IV based on a memory
7693 object. Note that we only consider the candidate. If this is not
7694 based on an object, the base of the reference is in some subexpression
7695 of the use -- but these will use pointer types, so they are recognized
7696 by the create_mem_ref heuristics anyway. */
7697 tree iv = var_at_stmt (data->current_loop, cand, use->stmt);
7698 tree base_hint = (cand->iv->base_object) ? iv : NULL_TREE;
7699 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7700 tree type = use->mem_type;
7701 tree alias_ptr_type;
7702 if (use->type == USE_PTR_ADDRESS)
7703 alias_ptr_type = get_alias_ptr_type_for_ptr_address (use);
7704 else
7705 {
7706 gcc_assert (type == TREE_TYPE (*use->op_p));
7707 unsigned int align = get_object_alignment (*use->op_p);
7708 if (align != TYPE_ALIGN (type))
7709 type = build_aligned_type (type, align);
7710 alias_ptr_type = reference_alias_ptr_type (*use->op_p);
7711 }
7712 tree ref = create_mem_ref (&bsi, type, &aff, alias_ptr_type,
7713 iv, base_hint, data->speed);
7714
7715 if (use->type == USE_PTR_ADDRESS)
7716 {
7717 ref = fold_build1 (ADDR_EXPR, build_pointer_type (use->mem_type), ref);
7718 ref = fold_convert (get_use_type (use), ref);
7719 ref = force_gimple_operand_gsi (&bsi, ref, true, NULL_TREE,
7720 true, GSI_SAME_STMT);
7721 }
7722 else
7723 copy_ref_info (ref, *use->op_p);
7724
7725 *use->op_p = ref;
7726 }
7727
7728 /* Rewrites USE (the condition such that one of the arguments is an iv) using
7729 candidate CAND. */
7730
7731 static void
7732 rewrite_use_compare (struct ivopts_data *data,
7733 struct iv_use *use, struct iv_cand *cand)
7734 {
7735 tree comp, op, bound;
7736 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7737 enum tree_code compare;
7738 struct iv_group *group = data->vgroups[use->group_id];
7739 class cost_pair *cp = get_group_iv_cost (data, group, cand);
7740
7741 bound = cp->value;
7742 if (bound)
7743 {
7744 tree var = var_at_stmt (data->current_loop, cand, use->stmt);
7745 tree var_type = TREE_TYPE (var);
7746 gimple_seq stmts;
7747
7748 if (dump_file && (dump_flags & TDF_DETAILS))
7749 {
7750 fprintf (dump_file, "Replacing exit test: ");
7751 print_gimple_stmt (dump_file, use->stmt, 0, TDF_SLIM);
7752 }
7753 compare = cp->comp;
7754 bound = unshare_expr (fold_convert (var_type, bound));
7755 op = force_gimple_operand (bound, &stmts, true, NULL_TREE);
7756 if (stmts)
7757 gsi_insert_seq_on_edge_immediate (
7758 loop_preheader_edge (data->current_loop),
7759 stmts);
7760
7761 gcond *cond_stmt = as_a <gcond *> (use->stmt);
7762 gimple_cond_set_lhs (cond_stmt, var);
7763 gimple_cond_set_code (cond_stmt, compare);
7764 gimple_cond_set_rhs (cond_stmt, op);
7765 return;
7766 }
7767
7768 /* The induction variable elimination failed; just express the original
7769 giv. */
7770 comp = get_computation_at (data->current_loop, use->stmt, use, cand);
7771 gcc_assert (comp != NULL_TREE);
7772 gcc_assert (use->op_p != NULL);
7773 *use->op_p = force_gimple_operand_gsi (&bsi, comp, true,
7774 SSA_NAME_VAR (*use->op_p),
7775 true, GSI_SAME_STMT);
7776 }
7777
7778 /* Rewrite the groups using the selected induction variables. */
7779
7780 static void
7781 rewrite_groups (struct ivopts_data *data)
7782 {
7783 unsigned i, j;
7784
7785 for (i = 0; i < data->vgroups.length (); i++)
7786 {
7787 struct iv_group *group = data->vgroups[i];
7788 struct iv_cand *cand = group->selected;
7789
7790 gcc_assert (cand);
7791
7792 if (group->type == USE_NONLINEAR_EXPR)
7793 {
7794 for (j = 0; j < group->vuses.length (); j++)
7795 {
7796 rewrite_use_nonlinear_expr (data, group->vuses[j], cand);
7797 update_stmt (group->vuses[j]->stmt);
7798 }
7799 }
7800 else if (address_p (group->type))
7801 {
7802 for (j = 0; j < group->vuses.length (); j++)
7803 {
7804 rewrite_use_address (data, group->vuses[j], cand);
7805 update_stmt (group->vuses[j]->stmt);
7806 }
7807 }
7808 else
7809 {
7810 gcc_assert (group->type == USE_COMPARE);
7811
7812 for (j = 0; j < group->vuses.length (); j++)
7813 {
7814 rewrite_use_compare (data, group->vuses[j], cand);
7815 update_stmt (group->vuses[j]->stmt);
7816 }
7817 }
7818 }
7819 }
7820
7821 /* Removes the ivs that are not used after rewriting. */
7822
7823 static void
7824 remove_unused_ivs (struct ivopts_data *data, bitmap toremove)
7825 {
7826 unsigned j;
7827 bitmap_iterator bi;
7828
7829 /* Figure out an order in which to release SSA DEFs so that we don't
7830 release something that we'd have to propagate into a debug stmt
7831 afterwards. */
7832 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
7833 {
7834 struct version_info *info;
7835
7836 info = ver_info (data, j);
7837 if (info->iv
7838 && !integer_zerop (info->iv->step)
7839 && !info->inv_id
7840 && !info->iv->nonlin_use
7841 && !info->preserve_biv)
7842 {
7843 bitmap_set_bit (toremove, SSA_NAME_VERSION (info->iv->ssa_name));
7844
7845 tree def = info->iv->ssa_name;
7846
7847 if (MAY_HAVE_DEBUG_BIND_STMTS && SSA_NAME_DEF_STMT (def))
7848 {
7849 imm_use_iterator imm_iter;
7850 use_operand_p use_p;
7851 gimple *stmt;
7852 int count = 0;
7853
7854 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7855 {
7856 if (!gimple_debug_bind_p (stmt))
7857 continue;
7858
7859 /* We just want to determine whether to do nothing
7860 (count == 0), to substitute the computed
7861 expression into a single use of the SSA DEF by
7862 itself (count == 1), or to use a debug temp
7863 because the SSA DEF is used multiple times or as
7864 part of a larger expression (count > 1). */
7865 count++;
7866 if (gimple_debug_bind_get_value (stmt) != def)
7867 count++;
7868
7869 if (count > 1)
7870 break;
7871 }
7872
7873 if (!count)
7874 continue;
7875
7876 struct iv_use dummy_use;
7877 struct iv_cand *best_cand = NULL, *cand;
7878 unsigned i, best_pref = 0, cand_pref;
7879 tree comp = NULL_TREE;
7880
7881 memset (&dummy_use, 0, sizeof (dummy_use));
7882 dummy_use.iv = info->iv;
7883 for (i = 0; i < data->vgroups.length () && i < 64; i++)
7884 {
7885 cand = data->vgroups[i]->selected;
7886 if (cand == best_cand)
7887 continue;
7888 cand_pref = operand_equal_p (cand->iv->step,
7889 info->iv->step, 0)
7890 ? 4 : 0;
7891 cand_pref
7892 += TYPE_MODE (TREE_TYPE (cand->iv->base))
7893 == TYPE_MODE (TREE_TYPE (info->iv->base))
7894 ? 2 : 0;
7895 cand_pref
7896 += TREE_CODE (cand->iv->base) == INTEGER_CST
7897 ? 1 : 0;
7898 if (best_cand == NULL || best_pref < cand_pref)
7899 {
7900 tree this_comp
7901 = get_debug_computation_at (data->current_loop,
7902 SSA_NAME_DEF_STMT (def),
7903 &dummy_use, cand);
7904 if (this_comp)
7905 {
7906 best_cand = cand;
7907 best_pref = cand_pref;
7908 comp = this_comp;
7909 }
7910 }
7911 }
7912
7913 if (!best_cand)
7914 continue;
7915
7916 comp = unshare_expr (comp);
7917 if (count > 1)
7918 {
7919 tree vexpr = build_debug_expr_decl (TREE_TYPE (comp));
7920 /* FIXME: Is setting the mode really necessary? */
7921 if (SSA_NAME_VAR (def))
7922 SET_DECL_MODE (vexpr, DECL_MODE (SSA_NAME_VAR (def)));
7923 else
7924 SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (vexpr)));
7925 gdebug *def_temp
7926 = gimple_build_debug_bind (vexpr, comp, NULL);
7927 gimple_stmt_iterator gsi;
7928
7929 if (gimple_code (SSA_NAME_DEF_STMT (def)) == GIMPLE_PHI)
7930 gsi = gsi_after_labels (gimple_bb
7931 (SSA_NAME_DEF_STMT (def)));
7932 else
7933 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (def));
7934
7935 gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
7936 comp = vexpr;
7937 }
7938
7939 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7940 {
7941 if (!gimple_debug_bind_p (stmt))
7942 continue;
7943
7944 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7945 SET_USE (use_p, comp);
7946
7947 update_stmt (stmt);
7948 }
7949 }
7950 }
7951 }
7952 }
7953
7954 /* Frees memory occupied by class tree_niter_desc in *VALUE. Callback
7955 for hash_map::traverse. */
7956
7957 bool
7958 free_tree_niter_desc (edge const &, tree_niter_desc *const &value, void *)
7959 {
7960 free (value);
7961 return true;
7962 }
7963
7964 /* Frees data allocated by the optimization of a single loop. */
7965
7966 static void
7967 free_loop_data (struct ivopts_data *data)
7968 {
7969 unsigned i, j;
7970 bitmap_iterator bi;
7971 tree obj;
7972
7973 if (data->niters)
7974 {
7975 data->niters->traverse<void *, free_tree_niter_desc> (NULL);
7976 delete data->niters;
7977 data->niters = NULL;
7978 }
7979
7980 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
7981 {
7982 struct version_info *info;
7983
7984 info = ver_info (data, i);
7985 info->iv = NULL;
7986 info->has_nonlin_use = false;
7987 info->preserve_biv = false;
7988 info->inv_id = 0;
7989 }
7990 bitmap_clear (data->relevant);
7991 bitmap_clear (data->important_candidates);
7992
7993 for (i = 0; i < data->vgroups.length (); i++)
7994 {
7995 struct iv_group *group = data->vgroups[i];
7996
7997 for (j = 0; j < group->vuses.length (); j++)
7998 free (group->vuses[j]);
7999 group->vuses.release ();
8000
8001 BITMAP_FREE (group->related_cands);
8002 for (j = 0; j < group->n_map_members; j++)
8003 {
8004 if (group->cost_map[j].inv_vars)
8005 BITMAP_FREE (group->cost_map[j].inv_vars);
8006 if (group->cost_map[j].inv_exprs)
8007 BITMAP_FREE (group->cost_map[j].inv_exprs);
8008 }
8009
8010 free (group->cost_map);
8011 free (group);
8012 }
8013 data->vgroups.truncate (0);
8014
8015 for (i = 0; i < data->vcands.length (); i++)
8016 {
8017 struct iv_cand *cand = data->vcands[i];
8018
8019 if (cand->inv_vars)
8020 BITMAP_FREE (cand->inv_vars);
8021 if (cand->inv_exprs)
8022 BITMAP_FREE (cand->inv_exprs);
8023 free (cand);
8024 }
8025 data->vcands.truncate (0);
8026
8027 if (data->version_info_size < num_ssa_names)
8028 {
8029 data->version_info_size = 2 * num_ssa_names;
8030 free (data->version_info);
8031 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
8032 }
8033
8034 data->max_inv_var_id = 0;
8035 data->max_inv_expr_id = 0;
8036
8037 FOR_EACH_VEC_ELT (decl_rtl_to_reset, i, obj)
8038 SET_DECL_RTL (obj, NULL_RTX);
8039
8040 decl_rtl_to_reset.truncate (0);
8041
8042 data->inv_expr_tab->empty ();
8043
8044 data->iv_common_cand_tab->empty ();
8045 data->iv_common_cands.truncate (0);
8046 }
8047
8048 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
8049 loop tree. */
8050
8051 static void
8052 tree_ssa_iv_optimize_finalize (struct ivopts_data *data)
8053 {
8054 free_loop_data (data);
8055 free (data->version_info);
8056 BITMAP_FREE (data->relevant);
8057 BITMAP_FREE (data->important_candidates);
8058
8059 decl_rtl_to_reset.release ();
8060 data->vgroups.release ();
8061 data->vcands.release ();
8062 delete data->inv_expr_tab;
8063 data->inv_expr_tab = NULL;
8064 free_affine_expand_cache (&data->name_expansion_cache);
8065 if (data->base_object_map)
8066 delete data->base_object_map;
8067 delete data->iv_common_cand_tab;
8068 data->iv_common_cand_tab = NULL;
8069 data->iv_common_cands.release ();
8070 obstack_free (&data->iv_obstack, NULL);
8071 }
8072
8073 /* Returns true if the loop body BODY includes any function calls. */
8074
8075 static bool
8076 loop_body_includes_call (basic_block *body, unsigned num_nodes)
8077 {
8078 gimple_stmt_iterator gsi;
8079 unsigned i;
8080
8081 for (i = 0; i < num_nodes; i++)
8082 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
8083 {
8084 gimple *stmt = gsi_stmt (gsi);
8085 if (is_gimple_call (stmt)
8086 && !gimple_call_internal_p (stmt)
8087 && !is_inexpensive_builtin (gimple_call_fndecl (stmt)))
8088 return true;
8089 }
8090 return false;
8091 }
8092
8093 /* Determine cost scaling factor for basic blocks in loop. */
8094 #define COST_SCALING_FACTOR_BOUND (20)
8095
8096 static void
8097 determine_scaling_factor (struct ivopts_data *data, basic_block *body)
8098 {
8099 int lfreq = data->current_loop->header->count.to_frequency (cfun);
8100 if (!data->speed || lfreq <= 0)
8101 return;
8102
8103 int max_freq = lfreq;
8104 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
8105 {
8106 body[i]->aux = (void *)(intptr_t) 1;
8107 if (max_freq < body[i]->count.to_frequency (cfun))
8108 max_freq = body[i]->count.to_frequency (cfun);
8109 }
8110 if (max_freq > lfreq)
8111 {
8112 int divisor, factor;
8113 /* Check if scaling factor itself needs to be scaled by the bound. This
8114 is to avoid overflow when scaling cost according to profile info. */
8115 if (max_freq / lfreq > COST_SCALING_FACTOR_BOUND)
8116 {
8117 divisor = max_freq;
8118 factor = COST_SCALING_FACTOR_BOUND;
8119 }
8120 else
8121 {
8122 divisor = lfreq;
8123 factor = 1;
8124 }
8125 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
8126 {
8127 int bfreq = body[i]->count.to_frequency (cfun);
8128 if (bfreq <= lfreq)
8129 continue;
8130
8131 body[i]->aux = (void*)(intptr_t) (factor * bfreq / divisor);
8132 }
8133 }
8134 }
8135
8136 /* Find doloop comparison use and set its doloop_p on if found. */
8137
8138 static bool
8139 find_doloop_use (struct ivopts_data *data)
8140 {
8141 struct loop *loop = data->current_loop;
8142
8143 for (unsigned i = 0; i < data->vgroups.length (); i++)
8144 {
8145 struct iv_group *group = data->vgroups[i];
8146 if (group->type == USE_COMPARE)
8147 {
8148 gcc_assert (group->vuses.length () == 1);
8149 struct iv_use *use = group->vuses[0];
8150 gimple *stmt = use->stmt;
8151 if (gimple_code (stmt) == GIMPLE_COND)
8152 {
8153 basic_block bb = gimple_bb (stmt);
8154 edge true_edge, false_edge;
8155 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
8156 /* This comparison is used for loop latch. Require latch is empty
8157 for now. */
8158 if ((loop->latch == true_edge->dest
8159 || loop->latch == false_edge->dest)
8160 && empty_block_p (loop->latch))
8161 {
8162 group->doloop_p = true;
8163 if (dump_file && (dump_flags & TDF_DETAILS))
8164 {
8165 fprintf (dump_file, "Doloop cmp iv use: ");
8166 print_gimple_stmt (dump_file, stmt, TDF_DETAILS);
8167 }
8168 return true;
8169 }
8170 }
8171 }
8172 }
8173
8174 return false;
8175 }
8176
8177 /* For the targets which support doloop, to predict whether later RTL doloop
8178 transformation will perform on this loop, further detect the doloop use and
8179 mark the flag doloop_use_p if predicted. */
8180
8181 void
8182 analyze_and_mark_doloop_use (struct ivopts_data *data)
8183 {
8184 data->doloop_use_p = false;
8185
8186 if (!flag_branch_on_count_reg)
8187 return;
8188
8189 if (data->current_loop->unroll == USHRT_MAX)
8190 return;
8191
8192 if (!generic_predict_doloop_p (data))
8193 return;
8194
8195 if (find_doloop_use (data))
8196 {
8197 data->doloop_use_p = true;
8198 if (dump_file && (dump_flags & TDF_DETAILS))
8199 {
8200 struct loop *loop = data->current_loop;
8201 fprintf (dump_file,
8202 "Predict loop %d can perform"
8203 " doloop optimization later.\n",
8204 loop->num);
8205 flow_loop_dump (loop, dump_file, NULL, 1);
8206 }
8207 }
8208 }
8209
8210 /* Optimizes the LOOP. Returns true if anything changed. */
8211
8212 static bool
8213 tree_ssa_iv_optimize_loop (struct ivopts_data *data, class loop *loop,
8214 bitmap toremove)
8215 {
8216 bool changed = false;
8217 class iv_ca *iv_ca;
8218 edge exit = single_dom_exit (loop);
8219 basic_block *body;
8220
8221 gcc_assert (!data->niters);
8222 data->current_loop = loop;
8223 data->loop_loc = find_loop_location (loop).get_location_t ();
8224 data->speed = optimize_loop_for_speed_p (loop);
8225
8226 if (dump_file && (dump_flags & TDF_DETAILS))
8227 {
8228 fprintf (dump_file, "Processing loop %d", loop->num);
8229 if (data->loop_loc != UNKNOWN_LOCATION)
8230 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
8231 LOCATION_LINE (data->loop_loc));
8232 fprintf (dump_file, "\n");
8233
8234 if (exit)
8235 {
8236 fprintf (dump_file, " single exit %d -> %d, exit condition ",
8237 exit->src->index, exit->dest->index);
8238 print_gimple_stmt (dump_file, last_stmt (exit->src), 0, TDF_SLIM);
8239 fprintf (dump_file, "\n");
8240 }
8241
8242 fprintf (dump_file, "\n");
8243 }
8244
8245 body = get_loop_body (loop);
8246 data->body_includes_call = loop_body_includes_call (body, loop->num_nodes);
8247 renumber_gimple_stmt_uids_in_blocks (body, loop->num_nodes);
8248
8249 data->loop_single_exit_p
8250 = exit != NULL && loop_only_exit_p (loop, body, exit);
8251
8252 /* For each ssa name determines whether it behaves as an induction variable
8253 in some loop. */
8254 if (!find_induction_variables (data, body))
8255 goto finish;
8256
8257 /* Finds interesting uses (item 1). */
8258 find_interesting_uses (data, body);
8259 if (data->vgroups.length () > MAX_CONSIDERED_GROUPS)
8260 goto finish;
8261
8262 /* Determine cost scaling factor for basic blocks in loop. */
8263 determine_scaling_factor (data, body);
8264
8265 /* Analyze doloop possibility and mark the doloop use if predicted. */
8266 analyze_and_mark_doloop_use (data);
8267
8268 /* Finds candidates for the induction variables (item 2). */
8269 find_iv_candidates (data);
8270
8271 /* Calculates the costs (item 3, part 1). */
8272 determine_iv_costs (data);
8273 determine_group_iv_costs (data);
8274 determine_set_costs (data);
8275
8276 /* Find the optimal set of induction variables (item 3, part 2). */
8277 iv_ca = find_optimal_iv_set (data);
8278 /* Cleanup basic block aux field. */
8279 for (unsigned i = 0; i < data->current_loop->num_nodes; i++)
8280 body[i]->aux = NULL;
8281 if (!iv_ca)
8282 goto finish;
8283 changed = true;
8284
8285 /* Create the new induction variables (item 4, part 1). */
8286 create_new_ivs (data, iv_ca);
8287 iv_ca_free (&iv_ca);
8288
8289 /* Rewrite the uses (item 4, part 2). */
8290 rewrite_groups (data);
8291
8292 /* Remove the ivs that are unused after rewriting. */
8293 remove_unused_ivs (data, toremove);
8294
8295 finish:
8296 free (body);
8297 free_loop_data (data);
8298
8299 return changed;
8300 }
8301
8302 /* Main entry point. Optimizes induction variables in loops. */
8303
8304 void
8305 tree_ssa_iv_optimize (void)
8306 {
8307 struct ivopts_data data;
8308 auto_bitmap toremove;
8309
8310 tree_ssa_iv_optimize_init (&data);
8311 mark_ssa_maybe_undefs ();
8312
8313 /* Optimize the loops starting with the innermost ones. */
8314 for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
8315 {
8316 if (!dbg_cnt (ivopts_loop))
8317 continue;
8318
8319 if (dump_file && (dump_flags & TDF_DETAILS))
8320 flow_loop_dump (loop, dump_file, NULL, 1);
8321
8322 tree_ssa_iv_optimize_loop (&data, loop, toremove);
8323 }
8324
8325 /* Remove eliminated IV defs. */
8326 release_defs_bitset (toremove);
8327
8328 /* We have changed the structure of induction variables; it might happen
8329 that definitions in the scev database refer to some of them that were
8330 eliminated. */
8331 scev_reset_htab ();
8332 /* Likewise niter and control-IV information. */
8333 free_numbers_of_iterations_estimates (cfun);
8334
8335 tree_ssa_iv_optimize_finalize (&data);
8336 }
8337
8338 #include "gt-tree-ssa-loop-ivopts.h"