]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-ivopts.c
common.opt (fkeep-gc-roots-live): New undocumented option.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-ivopts.c
1 /* Induction variable optimizations.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This pass tries to find the optimal set of induction variables for the loop.
21 It optimizes just the basic linear induction variables (although adding
22 support for other types should not be too hard). It includes the
23 optimizations commonly known as strength reduction, induction variable
24 coalescing and induction variable elimination. It does it in the
25 following steps:
26
27 1) The interesting uses of induction variables are found. This includes
28
29 -- uses of induction variables in non-linear expressions
30 -- addresses of arrays
31 -- comparisons of induction variables
32
33 2) Candidates for the induction variables are found. This includes
34
35 -- old induction variables
36 -- the variables defined by expressions derived from the "interesting
37 uses" above
38
39 3) The optimal (w.r. to a cost function) set of variables is chosen. The
40 cost function assigns a cost to sets of induction variables and consists
41 of three parts:
42
43 -- The use costs. Each of the interesting uses chooses the best induction
44 variable in the set and adds its cost to the sum. The cost reflects
45 the time spent on modifying the induction variables value to be usable
46 for the given purpose (adding base and offset for arrays, etc.).
47 -- The variable costs. Each of the variables has a cost assigned that
48 reflects the costs associated with incrementing the value of the
49 variable. The original variables are somewhat preferred.
50 -- The set cost. Depending on the size of the set, extra cost may be
51 added to reflect register pressure.
52
53 All the costs are defined in a machine-specific way, using the target
54 hooks and machine descriptions to determine them.
55
56 4) The trees are transformed to use the new variables, the dead code is
57 removed.
58
59 All of this is done loop by loop. Doing it globally is theoretically
60 possible, it might give a better performance and it might enable us
61 to decide costs more precisely, but getting all the interactions right
62 would be complicated. */
63
64 #include "config.h"
65 #include "system.h"
66 #include "coretypes.h"
67 #include "backend.h"
68 #include "rtl.h"
69 #include "tree.h"
70 #include "gimple.h"
71 #include "cfghooks.h"
72 #include "tree-pass.h"
73 #include "tm_p.h"
74 #include "ssa.h"
75 #include "expmed.h"
76 #include "insn-config.h"
77 #include "emit-rtl.h"
78 #include "recog.h"
79 #include "cgraph.h"
80 #include "gimple-pretty-print.h"
81 #include "alias.h"
82 #include "fold-const.h"
83 #include "stor-layout.h"
84 #include "tree-eh.h"
85 #include "gimplify.h"
86 #include "gimple-iterator.h"
87 #include "gimplify-me.h"
88 #include "tree-cfg.h"
89 #include "tree-ssa-loop-ivopts.h"
90 #include "tree-ssa-loop-manip.h"
91 #include "tree-ssa-loop-niter.h"
92 #include "tree-ssa-loop.h"
93 #include "explow.h"
94 #include "expr.h"
95 #include "tree-dfa.h"
96 #include "tree-ssa.h"
97 #include "cfgloop.h"
98 #include "tree-scalar-evolution.h"
99 #include "params.h"
100 #include "tree-affine.h"
101 #include "tree-ssa-propagate.h"
102 #include "tree-ssa-address.h"
103 #include "builtins.h"
104 #include "tree-vectorizer.h"
105
106 /* FIXME: Expressions are expanded to RTL in this pass to determine the
107 cost of different addressing modes. This should be moved to a TBD
108 interface between the GIMPLE and RTL worlds. */
109
110 /* The infinite cost. */
111 #define INFTY 10000000
112
113 #define AVG_LOOP_NITER(LOOP) 5
114
115 /* Returns the expected number of loop iterations for LOOP.
116 The average trip count is computed from profile data if it
117 exists. */
118
119 static inline HOST_WIDE_INT
120 avg_loop_niter (struct loop *loop)
121 {
122 HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
123 if (niter == -1)
124 return AVG_LOOP_NITER (loop);
125
126 return niter;
127 }
128
129 /* Representation of the induction variable. */
130 struct iv
131 {
132 tree base; /* Initial value of the iv. */
133 tree base_object; /* A memory object to that the induction variable points. */
134 tree step; /* Step of the iv (constant only). */
135 tree ssa_name; /* The ssa name with the value. */
136 unsigned use_id; /* The identifier in the use if it is the case. */
137 bool biv_p; /* Is it a biv? */
138 bool have_use_for; /* Do we already have a use for it? */
139 bool no_overflow; /* True if the iv doesn't overflow. */
140 bool have_address_use;/* For biv, indicate if it's used in any address
141 type use. */
142 };
143
144 /* Per-ssa version information (induction variable descriptions, etc.). */
145 struct version_info
146 {
147 tree name; /* The ssa name. */
148 struct iv *iv; /* Induction variable description. */
149 bool has_nonlin_use; /* For a loop-level invariant, whether it is used in
150 an expression that is not an induction variable. */
151 bool preserve_biv; /* For the original biv, whether to preserve it. */
152 unsigned inv_id; /* Id of an invariant. */
153 };
154
155 /* Types of uses. */
156 enum use_type
157 {
158 USE_NONLINEAR_EXPR, /* Use in a nonlinear expression. */
159 USE_ADDRESS, /* Use in an address. */
160 USE_COMPARE /* Use is a compare. */
161 };
162
163 /* Cost of a computation. */
164 struct comp_cost
165 {
166 int cost; /* The runtime cost. */
167 unsigned complexity; /* The estimate of the complexity of the code for
168 the computation (in no concrete units --
169 complexity field should be larger for more
170 complex expressions and addressing modes). */
171 };
172
173 static const comp_cost no_cost = {0, 0};
174 static const comp_cost infinite_cost = {INFTY, INFTY};
175
176 /* The candidate - cost pair. */
177 struct cost_pair
178 {
179 struct iv_cand *cand; /* The candidate. */
180 comp_cost cost; /* The cost. */
181 bitmap depends_on; /* The list of invariants that have to be
182 preserved. */
183 tree value; /* For final value elimination, the expression for
184 the final value of the iv. For iv elimination,
185 the new bound to compare with. */
186 enum tree_code comp; /* For iv elimination, the comparison. */
187 int inv_expr_id; /* Loop invariant expression id. */
188 };
189
190 /* Use. */
191 struct iv_use
192 {
193 unsigned id; /* The id of the use. */
194 unsigned sub_id; /* The id of the sub use. */
195 enum use_type type; /* Type of the use. */
196 struct iv *iv; /* The induction variable it is based on. */
197 gimple *stmt; /* Statement in that it occurs. */
198 tree *op_p; /* The place where it occurs. */
199 bitmap related_cands; /* The set of "related" iv candidates, plus the common
200 important ones. */
201
202 unsigned n_map_members; /* Number of candidates in the cost_map list. */
203 struct cost_pair *cost_map;
204 /* The costs wrto the iv candidates. */
205
206 struct iv_cand *selected;
207 /* The selected candidate. */
208
209 struct iv_use *next; /* The next sub use. */
210 tree addr_base; /* Base address with const offset stripped. */
211 unsigned HOST_WIDE_INT addr_offset;
212 /* Const offset stripped from base address. */
213 };
214
215 /* The position where the iv is computed. */
216 enum iv_position
217 {
218 IP_NORMAL, /* At the end, just before the exit condition. */
219 IP_END, /* At the end of the latch block. */
220 IP_BEFORE_USE, /* Immediately before a specific use. */
221 IP_AFTER_USE, /* Immediately after a specific use. */
222 IP_ORIGINAL /* The original biv. */
223 };
224
225 /* The induction variable candidate. */
226 struct iv_cand
227 {
228 unsigned id; /* The number of the candidate. */
229 bool important; /* Whether this is an "important" candidate, i.e. such
230 that it should be considered by all uses. */
231 ENUM_BITFIELD(iv_position) pos : 8; /* Where it is computed. */
232 gimple *incremented_at;/* For original biv, the statement where it is
233 incremented. */
234 tree var_before; /* The variable used for it before increment. */
235 tree var_after; /* The variable used for it after increment. */
236 struct iv *iv; /* The value of the candidate. NULL for
237 "pseudocandidate" used to indicate the possibility
238 to replace the final value of an iv by direct
239 computation of the value. */
240 unsigned cost; /* Cost of the candidate. */
241 unsigned cost_step; /* Cost of the candidate's increment operation. */
242 struct iv_use *ainc_use; /* For IP_{BEFORE,AFTER}_USE candidates, the place
243 where it is incremented. */
244 bitmap depends_on; /* The list of invariants that are used in step of the
245 biv. */
246 struct iv *orig_iv; /* The original iv if this cand is added from biv with
247 smaller type. */
248 };
249
250 /* Hashtable entry for common candidate derived from iv uses. */
251 struct iv_common_cand
252 {
253 tree base;
254 tree step;
255 /* IV uses from which this common candidate is derived. */
256 auto_vec<iv_use *> uses;
257 hashval_t hash;
258 };
259
260 /* Hashtable helpers. */
261
262 struct iv_common_cand_hasher : delete_ptr_hash <iv_common_cand>
263 {
264 static inline hashval_t hash (const iv_common_cand *);
265 static inline bool equal (const iv_common_cand *, const iv_common_cand *);
266 };
267
268 /* Hash function for possible common candidates. */
269
270 inline hashval_t
271 iv_common_cand_hasher::hash (const iv_common_cand *ccand)
272 {
273 return ccand->hash;
274 }
275
276 /* Hash table equality function for common candidates. */
277
278 inline bool
279 iv_common_cand_hasher::equal (const iv_common_cand *ccand1,
280 const iv_common_cand *ccand2)
281 {
282 return (ccand1->hash == ccand2->hash
283 && operand_equal_p (ccand1->base, ccand2->base, 0)
284 && operand_equal_p (ccand1->step, ccand2->step, 0)
285 && (TYPE_PRECISION (TREE_TYPE (ccand1->base))
286 == TYPE_PRECISION (TREE_TYPE (ccand2->base))));
287 }
288
289 /* Loop invariant expression hashtable entry. */
290 struct iv_inv_expr_ent
291 {
292 tree expr;
293 int id;
294 hashval_t hash;
295 };
296
297 /* Hashtable helpers. */
298
299 struct iv_inv_expr_hasher : free_ptr_hash <iv_inv_expr_ent>
300 {
301 static inline hashval_t hash (const iv_inv_expr_ent *);
302 static inline bool equal (const iv_inv_expr_ent *, const iv_inv_expr_ent *);
303 };
304
305 /* Hash function for loop invariant expressions. */
306
307 inline hashval_t
308 iv_inv_expr_hasher::hash (const iv_inv_expr_ent *expr)
309 {
310 return expr->hash;
311 }
312
313 /* Hash table equality function for expressions. */
314
315 inline bool
316 iv_inv_expr_hasher::equal (const iv_inv_expr_ent *expr1,
317 const iv_inv_expr_ent *expr2)
318 {
319 return expr1->hash == expr2->hash
320 && operand_equal_p (expr1->expr, expr2->expr, 0);
321 }
322
323 struct ivopts_data
324 {
325 /* The currently optimized loop. */
326 struct loop *current_loop;
327 source_location loop_loc;
328
329 /* Numbers of iterations for all exits of the current loop. */
330 hash_map<edge, tree_niter_desc *> *niters;
331
332 /* Number of registers used in it. */
333 unsigned regs_used;
334
335 /* The size of version_info array allocated. */
336 unsigned version_info_size;
337
338 /* The array of information for the ssa names. */
339 struct version_info *version_info;
340
341 /* The hashtable of loop invariant expressions created
342 by ivopt. */
343 hash_table<iv_inv_expr_hasher> *inv_expr_tab;
344
345 /* Loop invariant expression id. */
346 int inv_expr_id;
347
348 /* The bitmap of indices in version_info whose value was changed. */
349 bitmap relevant;
350
351 /* The uses of induction variables. */
352 vec<iv_use *> iv_uses;
353
354 /* The candidates. */
355 vec<iv_cand *> iv_candidates;
356
357 /* A bitmap of important candidates. */
358 bitmap important_candidates;
359
360 /* Cache used by tree_to_aff_combination_expand. */
361 hash_map<tree, name_expansion *> *name_expansion_cache;
362
363 /* The hashtable of common candidates derived from iv uses. */
364 hash_table<iv_common_cand_hasher> *iv_common_cand_tab;
365
366 /* The common candidates. */
367 vec<iv_common_cand *> iv_common_cands;
368
369 /* The maximum invariant id. */
370 unsigned max_inv_id;
371
372 /* Number of no_overflow BIVs which are not used in memory address. */
373 unsigned bivs_not_used_in_addr;
374
375 /* Obstack for iv structure. */
376 struct obstack iv_obstack;
377
378 /* Whether to consider just related and important candidates when replacing a
379 use. */
380 bool consider_all_candidates;
381
382 /* Are we optimizing for speed? */
383 bool speed;
384
385 /* Whether the loop body includes any function calls. */
386 bool body_includes_call;
387
388 /* Whether the loop body can only be exited via single exit. */
389 bool loop_single_exit_p;
390 };
391
392 /* An assignment of iv candidates to uses. */
393
394 struct iv_ca
395 {
396 /* The number of uses covered by the assignment. */
397 unsigned upto;
398
399 /* Number of uses that cannot be expressed by the candidates in the set. */
400 unsigned bad_uses;
401
402 /* Candidate assigned to a use, together with the related costs. */
403 struct cost_pair **cand_for_use;
404
405 /* Number of times each candidate is used. */
406 unsigned *n_cand_uses;
407
408 /* The candidates used. */
409 bitmap cands;
410
411 /* The number of candidates in the set. */
412 unsigned n_cands;
413
414 /* Total number of registers needed. */
415 unsigned n_regs;
416
417 /* Total cost of expressing uses. */
418 comp_cost cand_use_cost;
419
420 /* Total cost of candidates. */
421 unsigned cand_cost;
422
423 /* Number of times each invariant is used. */
424 unsigned *n_invariant_uses;
425
426 /* The array holding the number of uses of each loop
427 invariant expressions created by ivopt. */
428 unsigned *used_inv_expr;
429
430 /* The number of created loop invariants. */
431 unsigned num_used_inv_expr;
432
433 /* Total cost of the assignment. */
434 comp_cost cost;
435 };
436
437 /* Difference of two iv candidate assignments. */
438
439 struct iv_ca_delta
440 {
441 /* Changed use. */
442 struct iv_use *use;
443
444 /* An old assignment (for rollback purposes). */
445 struct cost_pair *old_cp;
446
447 /* A new assignment. */
448 struct cost_pair *new_cp;
449
450 /* Next change in the list. */
451 struct iv_ca_delta *next_change;
452 };
453
454 /* Bound on number of candidates below that all candidates are considered. */
455
456 #define CONSIDER_ALL_CANDIDATES_BOUND \
457 ((unsigned) PARAM_VALUE (PARAM_IV_CONSIDER_ALL_CANDIDATES_BOUND))
458
459 /* If there are more iv occurrences, we just give up (it is quite unlikely that
460 optimizing such a loop would help, and it would take ages). */
461
462 #define MAX_CONSIDERED_USES \
463 ((unsigned) PARAM_VALUE (PARAM_IV_MAX_CONSIDERED_USES))
464
465 /* If there are at most this number of ivs in the set, try removing unnecessary
466 ivs from the set always. */
467
468 #define ALWAYS_PRUNE_CAND_SET_BOUND \
469 ((unsigned) PARAM_VALUE (PARAM_IV_ALWAYS_PRUNE_CAND_SET_BOUND))
470
471 /* The list of trees for that the decl_rtl field must be reset is stored
472 here. */
473
474 static vec<tree> decl_rtl_to_reset;
475
476 static comp_cost force_expr_to_var_cost (tree, bool);
477
478 /* Number of uses recorded in DATA. */
479
480 static inline unsigned
481 n_iv_uses (struct ivopts_data *data)
482 {
483 return data->iv_uses.length ();
484 }
485
486 /* Ith use recorded in DATA. */
487
488 static inline struct iv_use *
489 iv_use (struct ivopts_data *data, unsigned i)
490 {
491 return data->iv_uses[i];
492 }
493
494 /* Number of candidates recorded in DATA. */
495
496 static inline unsigned
497 n_iv_cands (struct ivopts_data *data)
498 {
499 return data->iv_candidates.length ();
500 }
501
502 /* Ith candidate recorded in DATA. */
503
504 static inline struct iv_cand *
505 iv_cand (struct ivopts_data *data, unsigned i)
506 {
507 return data->iv_candidates[i];
508 }
509
510 /* The single loop exit if it dominates the latch, NULL otherwise. */
511
512 edge
513 single_dom_exit (struct loop *loop)
514 {
515 edge exit = single_exit (loop);
516
517 if (!exit)
518 return NULL;
519
520 if (!just_once_each_iteration_p (loop, exit->src))
521 return NULL;
522
523 return exit;
524 }
525
526 /* Dumps information about the induction variable IV to FILE. */
527
528 void
529 dump_iv (FILE *file, struct iv *iv, bool dump_name)
530 {
531 if (iv->ssa_name && dump_name)
532 {
533 fprintf (file, "ssa name ");
534 print_generic_expr (file, iv->ssa_name, TDF_SLIM);
535 fprintf (file, "\n");
536 }
537
538 fprintf (file, " type ");
539 print_generic_expr (file, TREE_TYPE (iv->base), TDF_SLIM);
540 fprintf (file, "\n");
541
542 if (iv->step)
543 {
544 fprintf (file, " base ");
545 print_generic_expr (file, iv->base, TDF_SLIM);
546 fprintf (file, "\n");
547
548 fprintf (file, " step ");
549 print_generic_expr (file, iv->step, TDF_SLIM);
550 fprintf (file, "\n");
551 }
552 else
553 {
554 fprintf (file, " invariant ");
555 print_generic_expr (file, iv->base, TDF_SLIM);
556 fprintf (file, "\n");
557 }
558
559 if (iv->base_object)
560 {
561 fprintf (file, " base object ");
562 print_generic_expr (file, iv->base_object, TDF_SLIM);
563 fprintf (file, "\n");
564 }
565
566 if (iv->biv_p)
567 fprintf (file, " is a biv\n");
568
569 if (iv->no_overflow)
570 fprintf (file, " iv doesn't overflow wrto loop niter\n");
571 }
572
573 /* Dumps information about the USE to FILE. */
574
575 void
576 dump_use (FILE *file, struct iv_use *use)
577 {
578 fprintf (file, "use %d", use->id);
579 if (use->sub_id)
580 fprintf (file, ".%d", use->sub_id);
581
582 fprintf (file, "\n");
583
584 switch (use->type)
585 {
586 case USE_NONLINEAR_EXPR:
587 fprintf (file, " generic\n");
588 break;
589
590 case USE_ADDRESS:
591 fprintf (file, " address\n");
592 break;
593
594 case USE_COMPARE:
595 fprintf (file, " compare\n");
596 break;
597
598 default:
599 gcc_unreachable ();
600 }
601
602 fprintf (file, " in statement ");
603 print_gimple_stmt (file, use->stmt, 0, 0);
604 fprintf (file, "\n");
605
606 fprintf (file, " at position ");
607 if (use->op_p)
608 print_generic_expr (file, *use->op_p, TDF_SLIM);
609 fprintf (file, "\n");
610
611 dump_iv (file, use->iv, false);
612
613 if (use->related_cands)
614 {
615 fprintf (file, " related candidates ");
616 dump_bitmap (file, use->related_cands);
617 }
618 }
619
620 /* Dumps information about the uses to FILE. */
621
622 void
623 dump_uses (FILE *file, struct ivopts_data *data)
624 {
625 unsigned i;
626 struct iv_use *use;
627
628 for (i = 0; i < n_iv_uses (data); i++)
629 {
630 use = iv_use (data, i);
631 do
632 {
633 dump_use (file, use);
634 use = use->next;
635 }
636 while (use);
637 fprintf (file, "\n");
638 }
639 }
640
641 /* Dumps information about induction variable candidate CAND to FILE. */
642
643 void
644 dump_cand (FILE *file, struct iv_cand *cand)
645 {
646 struct iv *iv = cand->iv;
647
648 fprintf (file, "candidate %d%s\n",
649 cand->id, cand->important ? " (important)" : "");
650
651 if (cand->depends_on)
652 {
653 fprintf (file, " depends on ");
654 dump_bitmap (file, cand->depends_on);
655 }
656
657 if (!iv)
658 {
659 fprintf (file, " final value replacement\n");
660 return;
661 }
662
663 if (cand->var_before)
664 {
665 fprintf (file, " var_before ");
666 print_generic_expr (file, cand->var_before, TDF_SLIM);
667 fprintf (file, "\n");
668 }
669 if (cand->var_after)
670 {
671 fprintf (file, " var_after ");
672 print_generic_expr (file, cand->var_after, TDF_SLIM);
673 fprintf (file, "\n");
674 }
675
676 switch (cand->pos)
677 {
678 case IP_NORMAL:
679 fprintf (file, " incremented before exit test\n");
680 break;
681
682 case IP_BEFORE_USE:
683 fprintf (file, " incremented before use %d\n", cand->ainc_use->id);
684 break;
685
686 case IP_AFTER_USE:
687 fprintf (file, " incremented after use %d\n", cand->ainc_use->id);
688 break;
689
690 case IP_END:
691 fprintf (file, " incremented at end\n");
692 break;
693
694 case IP_ORIGINAL:
695 fprintf (file, " original biv\n");
696 break;
697 }
698
699 dump_iv (file, iv, false);
700 }
701
702 /* Returns the info for ssa version VER. */
703
704 static inline struct version_info *
705 ver_info (struct ivopts_data *data, unsigned ver)
706 {
707 return data->version_info + ver;
708 }
709
710 /* Returns the info for ssa name NAME. */
711
712 static inline struct version_info *
713 name_info (struct ivopts_data *data, tree name)
714 {
715 return ver_info (data, SSA_NAME_VERSION (name));
716 }
717
718 /* Returns true if STMT is after the place where the IP_NORMAL ivs will be
719 emitted in LOOP. */
720
721 static bool
722 stmt_after_ip_normal_pos (struct loop *loop, gimple *stmt)
723 {
724 basic_block bb = ip_normal_pos (loop), sbb = gimple_bb (stmt);
725
726 gcc_assert (bb);
727
728 if (sbb == loop->latch)
729 return true;
730
731 if (sbb != bb)
732 return false;
733
734 return stmt == last_stmt (bb);
735 }
736
737 /* Returns true if STMT if after the place where the original induction
738 variable CAND is incremented. If TRUE_IF_EQUAL is set, we return true
739 if the positions are identical. */
740
741 static bool
742 stmt_after_inc_pos (struct iv_cand *cand, gimple *stmt, bool true_if_equal)
743 {
744 basic_block cand_bb = gimple_bb (cand->incremented_at);
745 basic_block stmt_bb = gimple_bb (stmt);
746
747 if (!dominated_by_p (CDI_DOMINATORS, stmt_bb, cand_bb))
748 return false;
749
750 if (stmt_bb != cand_bb)
751 return true;
752
753 if (true_if_equal
754 && gimple_uid (stmt) == gimple_uid (cand->incremented_at))
755 return true;
756 return gimple_uid (stmt) > gimple_uid (cand->incremented_at);
757 }
758
759 /* Returns true if STMT if after the place where the induction variable
760 CAND is incremented in LOOP. */
761
762 static bool
763 stmt_after_increment (struct loop *loop, struct iv_cand *cand, gimple *stmt)
764 {
765 switch (cand->pos)
766 {
767 case IP_END:
768 return false;
769
770 case IP_NORMAL:
771 return stmt_after_ip_normal_pos (loop, stmt);
772
773 case IP_ORIGINAL:
774 case IP_AFTER_USE:
775 return stmt_after_inc_pos (cand, stmt, false);
776
777 case IP_BEFORE_USE:
778 return stmt_after_inc_pos (cand, stmt, true);
779
780 default:
781 gcc_unreachable ();
782 }
783 }
784
785 /* Returns true if EXP is a ssa name that occurs in an abnormal phi node. */
786
787 static bool
788 abnormal_ssa_name_p (tree exp)
789 {
790 if (!exp)
791 return false;
792
793 if (TREE_CODE (exp) != SSA_NAME)
794 return false;
795
796 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp) != 0;
797 }
798
799 /* Returns false if BASE or INDEX contains a ssa name that occurs in an
800 abnormal phi node. Callback for for_each_index. */
801
802 static bool
803 idx_contains_abnormal_ssa_name_p (tree base, tree *index,
804 void *data ATTRIBUTE_UNUSED)
805 {
806 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
807 {
808 if (abnormal_ssa_name_p (TREE_OPERAND (base, 2)))
809 return false;
810 if (abnormal_ssa_name_p (TREE_OPERAND (base, 3)))
811 return false;
812 }
813
814 return !abnormal_ssa_name_p (*index);
815 }
816
817 /* Returns true if EXPR contains a ssa name that occurs in an
818 abnormal phi node. */
819
820 bool
821 contains_abnormal_ssa_name_p (tree expr)
822 {
823 enum tree_code code;
824 enum tree_code_class codeclass;
825
826 if (!expr)
827 return false;
828
829 code = TREE_CODE (expr);
830 codeclass = TREE_CODE_CLASS (code);
831
832 if (code == SSA_NAME)
833 return SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr) != 0;
834
835 if (code == INTEGER_CST
836 || is_gimple_min_invariant (expr))
837 return false;
838
839 if (code == ADDR_EXPR)
840 return !for_each_index (&TREE_OPERAND (expr, 0),
841 idx_contains_abnormal_ssa_name_p,
842 NULL);
843
844 if (code == COND_EXPR)
845 return contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 0))
846 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 1))
847 || contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 2));
848
849 switch (codeclass)
850 {
851 case tcc_binary:
852 case tcc_comparison:
853 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 1)))
854 return true;
855
856 /* Fallthru. */
857 case tcc_unary:
858 if (contains_abnormal_ssa_name_p (TREE_OPERAND (expr, 0)))
859 return true;
860
861 break;
862
863 default:
864 gcc_unreachable ();
865 }
866
867 return false;
868 }
869
870 /* Returns the structure describing number of iterations determined from
871 EXIT of DATA->current_loop, or NULL if something goes wrong. */
872
873 static struct tree_niter_desc *
874 niter_for_exit (struct ivopts_data *data, edge exit)
875 {
876 struct tree_niter_desc *desc;
877 tree_niter_desc **slot;
878
879 if (!data->niters)
880 {
881 data->niters = new hash_map<edge, tree_niter_desc *>;
882 slot = NULL;
883 }
884 else
885 slot = data->niters->get (exit);
886
887 if (!slot)
888 {
889 /* Try to determine number of iterations. We cannot safely work with ssa
890 names that appear in phi nodes on abnormal edges, so that we do not
891 create overlapping life ranges for them (PR 27283). */
892 desc = XNEW (struct tree_niter_desc);
893 if (!number_of_iterations_exit (data->current_loop,
894 exit, desc, true)
895 || contains_abnormal_ssa_name_p (desc->niter))
896 {
897 XDELETE (desc);
898 desc = NULL;
899 }
900 data->niters->put (exit, desc);
901 }
902 else
903 desc = *slot;
904
905 return desc;
906 }
907
908 /* Returns the structure describing number of iterations determined from
909 single dominating exit of DATA->current_loop, or NULL if something
910 goes wrong. */
911
912 static struct tree_niter_desc *
913 niter_for_single_dom_exit (struct ivopts_data *data)
914 {
915 edge exit = single_dom_exit (data->current_loop);
916
917 if (!exit)
918 return NULL;
919
920 return niter_for_exit (data, exit);
921 }
922
923 /* Initializes data structures used by the iv optimization pass, stored
924 in DATA. */
925
926 static void
927 tree_ssa_iv_optimize_init (struct ivopts_data *data)
928 {
929 data->version_info_size = 2 * num_ssa_names;
930 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
931 data->relevant = BITMAP_ALLOC (NULL);
932 data->important_candidates = BITMAP_ALLOC (NULL);
933 data->max_inv_id = 0;
934 data->niters = NULL;
935 data->iv_uses.create (20);
936 data->iv_candidates.create (20);
937 data->inv_expr_tab = new hash_table<iv_inv_expr_hasher> (10);
938 data->inv_expr_id = 0;
939 data->name_expansion_cache = NULL;
940 data->iv_common_cand_tab = new hash_table<iv_common_cand_hasher> (10);
941 data->iv_common_cands.create (20);
942 decl_rtl_to_reset.create (20);
943 gcc_obstack_init (&data->iv_obstack);
944 }
945
946 /* Returns a memory object to that EXPR points. In case we are able to
947 determine that it does not point to any such object, NULL is returned. */
948
949 static tree
950 determine_base_object (tree expr)
951 {
952 enum tree_code code = TREE_CODE (expr);
953 tree base, obj;
954
955 /* If this is a pointer casted to any type, we need to determine
956 the base object for the pointer; so handle conversions before
957 throwing away non-pointer expressions. */
958 if (CONVERT_EXPR_P (expr))
959 return determine_base_object (TREE_OPERAND (expr, 0));
960
961 if (!POINTER_TYPE_P (TREE_TYPE (expr)))
962 return NULL_TREE;
963
964 switch (code)
965 {
966 case INTEGER_CST:
967 return NULL_TREE;
968
969 case ADDR_EXPR:
970 obj = TREE_OPERAND (expr, 0);
971 base = get_base_address (obj);
972
973 if (!base)
974 return expr;
975
976 if (TREE_CODE (base) == MEM_REF)
977 return determine_base_object (TREE_OPERAND (base, 0));
978
979 return fold_convert (ptr_type_node,
980 build_fold_addr_expr (base));
981
982 case POINTER_PLUS_EXPR:
983 return determine_base_object (TREE_OPERAND (expr, 0));
984
985 case PLUS_EXPR:
986 case MINUS_EXPR:
987 /* Pointer addition is done solely using POINTER_PLUS_EXPR. */
988 gcc_unreachable ();
989
990 default:
991 return fold_convert (ptr_type_node, expr);
992 }
993 }
994
995 /* Return true if address expression with non-DECL_P operand appears
996 in EXPR. */
997
998 static bool
999 contain_complex_addr_expr (tree expr)
1000 {
1001 bool res = false;
1002
1003 STRIP_NOPS (expr);
1004 switch (TREE_CODE (expr))
1005 {
1006 case POINTER_PLUS_EXPR:
1007 case PLUS_EXPR:
1008 case MINUS_EXPR:
1009 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 0));
1010 res |= contain_complex_addr_expr (TREE_OPERAND (expr, 1));
1011 break;
1012
1013 case ADDR_EXPR:
1014 return (!DECL_P (TREE_OPERAND (expr, 0)));
1015
1016 default:
1017 return false;
1018 }
1019
1020 return res;
1021 }
1022
1023 /* Allocates an induction variable with given initial value BASE and step STEP
1024 for loop LOOP. NO_OVERFLOW implies the iv doesn't overflow. */
1025
1026 static struct iv *
1027 alloc_iv (struct ivopts_data *data, tree base, tree step,
1028 bool no_overflow = false)
1029 {
1030 tree expr = base;
1031 struct iv *iv = (struct iv*) obstack_alloc (&data->iv_obstack,
1032 sizeof (struct iv));
1033 gcc_assert (step != NULL_TREE);
1034
1035 /* Lower address expression in base except ones with DECL_P as operand.
1036 By doing this:
1037 1) More accurate cost can be computed for address expressions;
1038 2) Duplicate candidates won't be created for bases in different
1039 forms, like &a[0] and &a. */
1040 STRIP_NOPS (expr);
1041 if ((TREE_CODE (expr) == ADDR_EXPR && !DECL_P (TREE_OPERAND (expr, 0)))
1042 || contain_complex_addr_expr (expr))
1043 {
1044 aff_tree comb;
1045 tree_to_aff_combination (expr, TREE_TYPE (base), &comb);
1046 base = fold_convert (TREE_TYPE (base), aff_combination_to_tree (&comb));
1047 }
1048
1049 iv->base = base;
1050 iv->base_object = determine_base_object (base);
1051 iv->step = step;
1052 iv->biv_p = false;
1053 iv->have_use_for = false;
1054 iv->use_id = 0;
1055 iv->ssa_name = NULL_TREE;
1056 iv->no_overflow = no_overflow;
1057 iv->have_address_use = false;
1058
1059 return iv;
1060 }
1061
1062 /* Sets STEP and BASE for induction variable IV. NO_OVERFLOW implies the IV
1063 doesn't overflow. */
1064
1065 static void
1066 set_iv (struct ivopts_data *data, tree iv, tree base, tree step,
1067 bool no_overflow)
1068 {
1069 struct version_info *info = name_info (data, iv);
1070
1071 gcc_assert (!info->iv);
1072
1073 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (iv));
1074 info->iv = alloc_iv (data, base, step, no_overflow);
1075 info->iv->ssa_name = iv;
1076 }
1077
1078 /* Finds induction variable declaration for VAR. */
1079
1080 static struct iv *
1081 get_iv (struct ivopts_data *data, tree var)
1082 {
1083 basic_block bb;
1084 tree type = TREE_TYPE (var);
1085
1086 if (!POINTER_TYPE_P (type)
1087 && !INTEGRAL_TYPE_P (type))
1088 return NULL;
1089
1090 if (!name_info (data, var)->iv)
1091 {
1092 bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1093
1094 if (!bb
1095 || !flow_bb_inside_loop_p (data->current_loop, bb))
1096 set_iv (data, var, var, build_int_cst (type, 0), true);
1097 }
1098
1099 return name_info (data, var)->iv;
1100 }
1101
1102 /* Return the first non-invariant ssa var found in EXPR. */
1103
1104 static tree
1105 extract_single_var_from_expr (tree expr)
1106 {
1107 int i, n;
1108 tree tmp;
1109 enum tree_code code;
1110
1111 if (!expr || is_gimple_min_invariant (expr))
1112 return NULL;
1113
1114 code = TREE_CODE (expr);
1115 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1116 {
1117 n = TREE_OPERAND_LENGTH (expr);
1118 for (i = 0; i < n; i++)
1119 {
1120 tmp = extract_single_var_from_expr (TREE_OPERAND (expr, i));
1121
1122 if (tmp)
1123 return tmp;
1124 }
1125 }
1126 return (TREE_CODE (expr) == SSA_NAME) ? expr : NULL;
1127 }
1128
1129 /* Finds basic ivs. */
1130
1131 static bool
1132 find_bivs (struct ivopts_data *data)
1133 {
1134 gphi *phi;
1135 affine_iv iv;
1136 tree step, type, base, stop;
1137 bool found = false;
1138 struct loop *loop = data->current_loop;
1139 gphi_iterator psi;
1140
1141 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1142 {
1143 phi = psi.phi ();
1144
1145 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (phi)))
1146 continue;
1147
1148 if (virtual_operand_p (PHI_RESULT (phi)))
1149 continue;
1150
1151 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
1152 continue;
1153
1154 if (integer_zerop (iv.step))
1155 continue;
1156
1157 step = iv.step;
1158 base = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1159 /* Stop expanding iv base at the first ssa var referred by iv step.
1160 Ideally we should stop at any ssa var, because that's expensive
1161 and unusual to happen, we just do it on the first one.
1162
1163 See PR64705 for the rationale. */
1164 stop = extract_single_var_from_expr (step);
1165 base = expand_simple_operations (base, stop);
1166 if (contains_abnormal_ssa_name_p (base)
1167 || contains_abnormal_ssa_name_p (step))
1168 continue;
1169
1170 type = TREE_TYPE (PHI_RESULT (phi));
1171 base = fold_convert (type, base);
1172 if (step)
1173 {
1174 if (POINTER_TYPE_P (type))
1175 step = convert_to_ptrofftype (step);
1176 else
1177 step = fold_convert (type, step);
1178 }
1179
1180 set_iv (data, PHI_RESULT (phi), base, step, iv.no_overflow);
1181 found = true;
1182 }
1183
1184 return found;
1185 }
1186
1187 /* Marks basic ivs. */
1188
1189 static void
1190 mark_bivs (struct ivopts_data *data)
1191 {
1192 gphi *phi;
1193 gimple *def;
1194 tree var;
1195 struct iv *iv, *incr_iv;
1196 struct loop *loop = data->current_loop;
1197 basic_block incr_bb;
1198 gphi_iterator psi;
1199
1200 data->bivs_not_used_in_addr = 0;
1201 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
1202 {
1203 phi = psi.phi ();
1204
1205 iv = get_iv (data, PHI_RESULT (phi));
1206 if (!iv)
1207 continue;
1208
1209 var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1210 def = SSA_NAME_DEF_STMT (var);
1211 /* Don't mark iv peeled from other one as biv. */
1212 if (def
1213 && gimple_code (def) == GIMPLE_PHI
1214 && gimple_bb (def) == loop->header)
1215 continue;
1216
1217 incr_iv = get_iv (data, var);
1218 if (!incr_iv)
1219 continue;
1220
1221 /* If the increment is in the subloop, ignore it. */
1222 incr_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
1223 if (incr_bb->loop_father != data->current_loop
1224 || (incr_bb->flags & BB_IRREDUCIBLE_LOOP))
1225 continue;
1226
1227 iv->biv_p = true;
1228 incr_iv->biv_p = true;
1229 if (iv->no_overflow)
1230 data->bivs_not_used_in_addr++;
1231 if (incr_iv->no_overflow)
1232 data->bivs_not_used_in_addr++;
1233 }
1234 }
1235
1236 /* Checks whether STMT defines a linear induction variable and stores its
1237 parameters to IV. */
1238
1239 static bool
1240 find_givs_in_stmt_scev (struct ivopts_data *data, gimple *stmt, affine_iv *iv)
1241 {
1242 tree lhs, stop;
1243 struct loop *loop = data->current_loop;
1244
1245 iv->base = NULL_TREE;
1246 iv->step = NULL_TREE;
1247
1248 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1249 return false;
1250
1251 lhs = gimple_assign_lhs (stmt);
1252 if (TREE_CODE (lhs) != SSA_NAME)
1253 return false;
1254
1255 if (!simple_iv (loop, loop_containing_stmt (stmt), lhs, iv, true))
1256 return false;
1257
1258 /* Stop expanding iv base at the first ssa var referred by iv step.
1259 Ideally we should stop at any ssa var, because that's expensive
1260 and unusual to happen, we just do it on the first one.
1261
1262 See PR64705 for the rationale. */
1263 stop = extract_single_var_from_expr (iv->step);
1264 iv->base = expand_simple_operations (iv->base, stop);
1265 if (contains_abnormal_ssa_name_p (iv->base)
1266 || contains_abnormal_ssa_name_p (iv->step))
1267 return false;
1268
1269 /* If STMT could throw, then do not consider STMT as defining a GIV.
1270 While this will suppress optimizations, we can not safely delete this
1271 GIV and associated statements, even if it appears it is not used. */
1272 if (stmt_could_throw_p (stmt))
1273 return false;
1274
1275 return true;
1276 }
1277
1278 /* Finds general ivs in statement STMT. */
1279
1280 static void
1281 find_givs_in_stmt (struct ivopts_data *data, gimple *stmt)
1282 {
1283 affine_iv iv;
1284
1285 if (!find_givs_in_stmt_scev (data, stmt, &iv))
1286 return;
1287
1288 set_iv (data, gimple_assign_lhs (stmt), iv.base, iv.step, iv.no_overflow);
1289 }
1290
1291 /* Finds general ivs in basic block BB. */
1292
1293 static void
1294 find_givs_in_bb (struct ivopts_data *data, basic_block bb)
1295 {
1296 gimple_stmt_iterator bsi;
1297
1298 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1299 find_givs_in_stmt (data, gsi_stmt (bsi));
1300 }
1301
1302 /* Finds general ivs. */
1303
1304 static void
1305 find_givs (struct ivopts_data *data)
1306 {
1307 struct loop *loop = data->current_loop;
1308 basic_block *body = get_loop_body_in_dom_order (loop);
1309 unsigned i;
1310
1311 for (i = 0; i < loop->num_nodes; i++)
1312 find_givs_in_bb (data, body[i]);
1313 free (body);
1314 }
1315
1316 /* For each ssa name defined in LOOP determines whether it is an induction
1317 variable and if so, its initial value and step. */
1318
1319 static bool
1320 find_induction_variables (struct ivopts_data *data)
1321 {
1322 unsigned i;
1323 bitmap_iterator bi;
1324
1325 if (!find_bivs (data))
1326 return false;
1327
1328 find_givs (data);
1329 mark_bivs (data);
1330
1331 if (dump_file && (dump_flags & TDF_DETAILS))
1332 {
1333 struct tree_niter_desc *niter = niter_for_single_dom_exit (data);
1334
1335 if (niter)
1336 {
1337 fprintf (dump_file, " number of iterations ");
1338 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1339 if (!integer_zerop (niter->may_be_zero))
1340 {
1341 fprintf (dump_file, "; zero if ");
1342 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1343 }
1344 fprintf (dump_file, "\n\n");
1345 };
1346
1347 fprintf (dump_file, "Induction variables:\n\n");
1348
1349 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1350 {
1351 if (ver_info (data, i)->iv)
1352 dump_iv (dump_file, ver_info (data, i)->iv, true);
1353 }
1354 }
1355
1356 return true;
1357 }
1358
1359 /* Records a use of type USE_TYPE at *USE_P in STMT whose value is IV.
1360 For address type use, ADDR_BASE is the stripped IV base, ADDR_OFFSET
1361 is the const offset stripped from IV base. For uses of other types,
1362 ADDR_BASE and ADDR_OFFSET are zero by default. */
1363
1364 static struct iv_use *
1365 record_use (struct ivopts_data *data, tree *use_p, struct iv *iv,
1366 gimple *stmt, enum use_type use_type, tree addr_base = NULL,
1367 unsigned HOST_WIDE_INT addr_offset = 0)
1368 {
1369 struct iv_use *use = XCNEW (struct iv_use);
1370
1371 use->id = n_iv_uses (data);
1372 use->sub_id = 0;
1373 use->type = use_type;
1374 use->iv = iv;
1375 use->stmt = stmt;
1376 use->op_p = use_p;
1377 use->related_cands = BITMAP_ALLOC (NULL);
1378 use->next = NULL;
1379 use->addr_base = addr_base;
1380 use->addr_offset = addr_offset;
1381
1382 data->iv_uses.safe_push (use);
1383
1384 return use;
1385 }
1386
1387 /* Records a sub use of type USE_TYPE at *USE_P in STMT whose value is IV.
1388 The sub use is recorded under the one whose use id is ID_GROUP. */
1389
1390 static struct iv_use *
1391 record_sub_use (struct ivopts_data *data, tree *use_p,
1392 struct iv *iv, gimple *stmt, enum use_type use_type,
1393 tree addr_base, unsigned HOST_WIDE_INT addr_offset,
1394 unsigned int id_group)
1395 {
1396 struct iv_use *use = XCNEW (struct iv_use);
1397 struct iv_use *group = iv_use (data, id_group);
1398
1399 use->id = group->id;
1400 use->sub_id = 0;
1401 use->type = use_type;
1402 use->iv = iv;
1403 use->stmt = stmt;
1404 use->op_p = use_p;
1405 use->related_cands = NULL;
1406 use->addr_base = addr_base;
1407 use->addr_offset = addr_offset;
1408
1409 /* Sub use list is maintained in offset ascending order. */
1410 if (addr_offset <= group->addr_offset)
1411 {
1412 use->related_cands = group->related_cands;
1413 group->related_cands = NULL;
1414 use->next = group;
1415 data->iv_uses[id_group] = use;
1416 }
1417 else
1418 {
1419 struct iv_use *pre;
1420 do
1421 {
1422 pre = group;
1423 group = group->next;
1424 }
1425 while (group && addr_offset > group->addr_offset);
1426 use->next = pre->next;
1427 pre->next = use;
1428 }
1429
1430 return use;
1431 }
1432
1433 /* Checks whether OP is a loop-level invariant and if so, records it.
1434 NONLINEAR_USE is true if the invariant is used in a way we do not
1435 handle specially. */
1436
1437 static void
1438 record_invariant (struct ivopts_data *data, tree op, bool nonlinear_use)
1439 {
1440 basic_block bb;
1441 struct version_info *info;
1442
1443 if (TREE_CODE (op) != SSA_NAME
1444 || virtual_operand_p (op))
1445 return;
1446
1447 bb = gimple_bb (SSA_NAME_DEF_STMT (op));
1448 if (bb
1449 && flow_bb_inside_loop_p (data->current_loop, bb))
1450 return;
1451
1452 info = name_info (data, op);
1453 info->name = op;
1454 info->has_nonlin_use |= nonlinear_use;
1455 if (!info->inv_id)
1456 info->inv_id = ++data->max_inv_id;
1457 bitmap_set_bit (data->relevant, SSA_NAME_VERSION (op));
1458 }
1459
1460 /* Checks whether the use OP is interesting and if so, records it. */
1461
1462 static struct iv_use *
1463 find_interesting_uses_op (struct ivopts_data *data, tree op)
1464 {
1465 struct iv *iv;
1466 gimple *stmt;
1467 struct iv_use *use;
1468
1469 if (TREE_CODE (op) != SSA_NAME)
1470 return NULL;
1471
1472 iv = get_iv (data, op);
1473 if (!iv)
1474 return NULL;
1475
1476 if (iv->have_use_for)
1477 {
1478 use = iv_use (data, iv->use_id);
1479
1480 gcc_assert (use->type == USE_NONLINEAR_EXPR);
1481 return use;
1482 }
1483
1484 if (integer_zerop (iv->step))
1485 {
1486 record_invariant (data, op, true);
1487 return NULL;
1488 }
1489 iv->have_use_for = true;
1490
1491 stmt = SSA_NAME_DEF_STMT (op);
1492 gcc_assert (gimple_code (stmt) == GIMPLE_PHI
1493 || is_gimple_assign (stmt));
1494
1495 use = record_use (data, NULL, iv, stmt, USE_NONLINEAR_EXPR);
1496 iv->use_id = use->id;
1497
1498 return use;
1499 }
1500
1501 /* Given a condition in statement STMT, checks whether it is a compare
1502 of an induction variable and an invariant. If this is the case,
1503 CONTROL_VAR is set to location of the iv, BOUND to the location of
1504 the invariant, IV_VAR and IV_BOUND are set to the corresponding
1505 induction variable descriptions, and true is returned. If this is not
1506 the case, CONTROL_VAR and BOUND are set to the arguments of the
1507 condition and false is returned. */
1508
1509 static bool
1510 extract_cond_operands (struct ivopts_data *data, gimple *stmt,
1511 tree **control_var, tree **bound,
1512 struct iv **iv_var, struct iv **iv_bound)
1513 {
1514 /* The objects returned when COND has constant operands. */
1515 static struct iv const_iv;
1516 static tree zero;
1517 tree *op0 = &zero, *op1 = &zero;
1518 struct iv *iv0 = &const_iv, *iv1 = &const_iv;
1519 bool ret = false;
1520
1521 if (gimple_code (stmt) == GIMPLE_COND)
1522 {
1523 gcond *cond_stmt = as_a <gcond *> (stmt);
1524 op0 = gimple_cond_lhs_ptr (cond_stmt);
1525 op1 = gimple_cond_rhs_ptr (cond_stmt);
1526 }
1527 else
1528 {
1529 op0 = gimple_assign_rhs1_ptr (stmt);
1530 op1 = gimple_assign_rhs2_ptr (stmt);
1531 }
1532
1533 zero = integer_zero_node;
1534 const_iv.step = integer_zero_node;
1535
1536 if (TREE_CODE (*op0) == SSA_NAME)
1537 iv0 = get_iv (data, *op0);
1538 if (TREE_CODE (*op1) == SSA_NAME)
1539 iv1 = get_iv (data, *op1);
1540
1541 /* Exactly one of the compared values must be an iv, and the other one must
1542 be an invariant. */
1543 if (!iv0 || !iv1)
1544 goto end;
1545
1546 if (integer_zerop (iv0->step))
1547 {
1548 /* Control variable may be on the other side. */
1549 std::swap (op0, op1);
1550 std::swap (iv0, iv1);
1551 }
1552 ret = !integer_zerop (iv0->step) && integer_zerop (iv1->step);
1553
1554 end:
1555 if (control_var)
1556 *control_var = op0;
1557 if (iv_var)
1558 *iv_var = iv0;
1559 if (bound)
1560 *bound = op1;
1561 if (iv_bound)
1562 *iv_bound = iv1;
1563
1564 return ret;
1565 }
1566
1567 /* Checks whether the condition in STMT is interesting and if so,
1568 records it. */
1569
1570 static void
1571 find_interesting_uses_cond (struct ivopts_data *data, gimple *stmt)
1572 {
1573 tree *var_p, *bound_p;
1574 struct iv *var_iv;
1575
1576 if (!extract_cond_operands (data, stmt, &var_p, &bound_p, &var_iv, NULL))
1577 {
1578 find_interesting_uses_op (data, *var_p);
1579 find_interesting_uses_op (data, *bound_p);
1580 return;
1581 }
1582
1583 record_use (data, NULL, var_iv, stmt, USE_COMPARE);
1584 }
1585
1586 /* Returns the outermost loop EXPR is obviously invariant in
1587 relative to the loop LOOP, i.e. if all its operands are defined
1588 outside of the returned loop. Returns NULL if EXPR is not
1589 even obviously invariant in LOOP. */
1590
1591 struct loop *
1592 outermost_invariant_loop_for_expr (struct loop *loop, tree expr)
1593 {
1594 basic_block def_bb;
1595 unsigned i, len;
1596
1597 if (is_gimple_min_invariant (expr))
1598 return current_loops->tree_root;
1599
1600 if (TREE_CODE (expr) == SSA_NAME)
1601 {
1602 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1603 if (def_bb)
1604 {
1605 if (flow_bb_inside_loop_p (loop, def_bb))
1606 return NULL;
1607 return superloop_at_depth (loop,
1608 loop_depth (def_bb->loop_father) + 1);
1609 }
1610
1611 return current_loops->tree_root;
1612 }
1613
1614 if (!EXPR_P (expr))
1615 return NULL;
1616
1617 unsigned maxdepth = 0;
1618 len = TREE_OPERAND_LENGTH (expr);
1619 for (i = 0; i < len; i++)
1620 {
1621 struct loop *ivloop;
1622 if (!TREE_OPERAND (expr, i))
1623 continue;
1624
1625 ivloop = outermost_invariant_loop_for_expr (loop, TREE_OPERAND (expr, i));
1626 if (!ivloop)
1627 return NULL;
1628 maxdepth = MAX (maxdepth, loop_depth (ivloop));
1629 }
1630
1631 return superloop_at_depth (loop, maxdepth);
1632 }
1633
1634 /* Returns true if expression EXPR is obviously invariant in LOOP,
1635 i.e. if all its operands are defined outside of the LOOP. LOOP
1636 should not be the function body. */
1637
1638 bool
1639 expr_invariant_in_loop_p (struct loop *loop, tree expr)
1640 {
1641 basic_block def_bb;
1642 unsigned i, len;
1643
1644 gcc_assert (loop_depth (loop) > 0);
1645
1646 if (is_gimple_min_invariant (expr))
1647 return true;
1648
1649 if (TREE_CODE (expr) == SSA_NAME)
1650 {
1651 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
1652 if (def_bb
1653 && flow_bb_inside_loop_p (loop, def_bb))
1654 return false;
1655
1656 return true;
1657 }
1658
1659 if (!EXPR_P (expr))
1660 return false;
1661
1662 len = TREE_OPERAND_LENGTH (expr);
1663 for (i = 0; i < len; i++)
1664 if (TREE_OPERAND (expr, i)
1665 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (expr, i)))
1666 return false;
1667
1668 return true;
1669 }
1670
1671 /* Given expression EXPR which computes inductive values with respect
1672 to loop recorded in DATA, this function returns biv from which EXPR
1673 is derived by tracing definition chains of ssa variables in EXPR. */
1674
1675 static struct iv*
1676 find_deriving_biv_for_expr (struct ivopts_data *data, tree expr)
1677 {
1678 struct iv *iv;
1679 unsigned i, n;
1680 tree e2, e1;
1681 enum tree_code code;
1682 gimple *stmt;
1683
1684 if (expr == NULL_TREE)
1685 return NULL;
1686
1687 if (is_gimple_min_invariant (expr))
1688 return NULL;
1689
1690 code = TREE_CODE (expr);
1691 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1692 {
1693 n = TREE_OPERAND_LENGTH (expr);
1694 for (i = 0; i < n; i++)
1695 {
1696 iv = find_deriving_biv_for_expr (data, TREE_OPERAND (expr, i));
1697 if (iv)
1698 return iv;
1699 }
1700 }
1701
1702 /* Stop if it's not ssa name. */
1703 if (code != SSA_NAME)
1704 return NULL;
1705
1706 iv = get_iv (data, expr);
1707 if (!iv || integer_zerop (iv->step))
1708 return NULL;
1709 else if (iv->biv_p)
1710 return iv;
1711
1712 stmt = SSA_NAME_DEF_STMT (expr);
1713 if (gphi *phi = dyn_cast <gphi *> (stmt))
1714 {
1715 ssa_op_iter iter;
1716 use_operand_p use_p;
1717
1718 if (virtual_operand_p (gimple_phi_result (phi)))
1719 return NULL;
1720
1721 FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
1722 {
1723 tree use = USE_FROM_PTR (use_p);
1724 iv = find_deriving_biv_for_expr (data, use);
1725 if (iv)
1726 return iv;
1727 }
1728 return NULL;
1729 }
1730 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1731 return NULL;
1732
1733 e1 = gimple_assign_rhs1 (stmt);
1734 code = gimple_assign_rhs_code (stmt);
1735 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1736 return find_deriving_biv_for_expr (data, e1);
1737
1738 switch (code)
1739 {
1740 case MULT_EXPR:
1741 case PLUS_EXPR:
1742 case MINUS_EXPR:
1743 case POINTER_PLUS_EXPR:
1744 /* Increments, decrements and multiplications by a constant
1745 are simple. */
1746 e2 = gimple_assign_rhs2 (stmt);
1747 iv = find_deriving_biv_for_expr (data, e2);
1748 if (iv)
1749 return iv;
1750
1751 /* Fallthru. */
1752 CASE_CONVERT:
1753 /* Casts are simple. */
1754 return find_deriving_biv_for_expr (data, e1);
1755
1756 default:
1757 break;
1758 }
1759
1760 return NULL;
1761 }
1762
1763 /* Record BIV, its predecessor and successor that they are used in
1764 address type uses. */
1765
1766 static void
1767 record_biv_for_address_use (struct ivopts_data *data, struct iv *biv)
1768 {
1769 unsigned i;
1770 tree type, base_1, base_2;
1771 bitmap_iterator bi;
1772
1773 if (!biv || !biv->biv_p || integer_zerop (biv->step)
1774 || biv->have_address_use || !biv->no_overflow)
1775 return;
1776
1777 type = TREE_TYPE (biv->base);
1778 if (!INTEGRAL_TYPE_P (type))
1779 return;
1780
1781 biv->have_address_use = true;
1782 data->bivs_not_used_in_addr--;
1783 base_1 = fold_build2 (PLUS_EXPR, type, biv->base, biv->step);
1784 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
1785 {
1786 struct iv *iv = ver_info (data, i)->iv;
1787
1788 if (!iv || !iv->biv_p || integer_zerop (iv->step)
1789 || iv->have_address_use || !iv->no_overflow)
1790 continue;
1791
1792 if (type != TREE_TYPE (iv->base)
1793 || !INTEGRAL_TYPE_P (TREE_TYPE (iv->base)))
1794 continue;
1795
1796 if (!operand_equal_p (biv->step, iv->step, 0))
1797 continue;
1798
1799 base_2 = fold_build2 (PLUS_EXPR, type, iv->base, iv->step);
1800 if (operand_equal_p (base_1, iv->base, 0)
1801 || operand_equal_p (base_2, biv->base, 0))
1802 {
1803 iv->have_address_use = true;
1804 data->bivs_not_used_in_addr--;
1805 }
1806 }
1807 }
1808
1809 /* Cumulates the steps of indices into DATA and replaces their values with the
1810 initial ones. Returns false when the value of the index cannot be determined.
1811 Callback for for_each_index. */
1812
1813 struct ifs_ivopts_data
1814 {
1815 struct ivopts_data *ivopts_data;
1816 gimple *stmt;
1817 tree step;
1818 };
1819
1820 static bool
1821 idx_find_step (tree base, tree *idx, void *data)
1822 {
1823 struct ifs_ivopts_data *dta = (struct ifs_ivopts_data *) data;
1824 struct iv *iv;
1825 bool use_overflow_semantics = false;
1826 tree step, iv_base, iv_step, lbound, off;
1827 struct loop *loop = dta->ivopts_data->current_loop;
1828
1829 /* If base is a component ref, require that the offset of the reference
1830 be invariant. */
1831 if (TREE_CODE (base) == COMPONENT_REF)
1832 {
1833 off = component_ref_field_offset (base);
1834 return expr_invariant_in_loop_p (loop, off);
1835 }
1836
1837 /* If base is array, first check whether we will be able to move the
1838 reference out of the loop (in order to take its address in strength
1839 reduction). In order for this to work we need both lower bound
1840 and step to be loop invariants. */
1841 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
1842 {
1843 /* Moreover, for a range, the size needs to be invariant as well. */
1844 if (TREE_CODE (base) == ARRAY_RANGE_REF
1845 && !expr_invariant_in_loop_p (loop, TYPE_SIZE (TREE_TYPE (base))))
1846 return false;
1847
1848 step = array_ref_element_size (base);
1849 lbound = array_ref_low_bound (base);
1850
1851 if (!expr_invariant_in_loop_p (loop, step)
1852 || !expr_invariant_in_loop_p (loop, lbound))
1853 return false;
1854 }
1855
1856 if (TREE_CODE (*idx) != SSA_NAME)
1857 return true;
1858
1859 iv = get_iv (dta->ivopts_data, *idx);
1860 if (!iv)
1861 return false;
1862
1863 /* XXX We produce for a base of *D42 with iv->base being &x[0]
1864 *&x[0], which is not folded and does not trigger the
1865 ARRAY_REF path below. */
1866 *idx = iv->base;
1867
1868 if (integer_zerop (iv->step))
1869 return true;
1870
1871 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
1872 {
1873 step = array_ref_element_size (base);
1874
1875 /* We only handle addresses whose step is an integer constant. */
1876 if (TREE_CODE (step) != INTEGER_CST)
1877 return false;
1878 }
1879 else
1880 /* The step for pointer arithmetics already is 1 byte. */
1881 step = size_one_node;
1882
1883 iv_base = iv->base;
1884 iv_step = iv->step;
1885 if (iv->no_overflow && nowrap_type_p (TREE_TYPE (iv_step)))
1886 use_overflow_semantics = true;
1887
1888 if (!convert_affine_scev (dta->ivopts_data->current_loop,
1889 sizetype, &iv_base, &iv_step, dta->stmt,
1890 use_overflow_semantics))
1891 {
1892 /* The index might wrap. */
1893 return false;
1894 }
1895
1896 step = fold_build2 (MULT_EXPR, sizetype, step, iv_step);
1897 dta->step = fold_build2 (PLUS_EXPR, sizetype, dta->step, step);
1898
1899 if (dta->ivopts_data->bivs_not_used_in_addr)
1900 {
1901 if (!iv->biv_p)
1902 iv = find_deriving_biv_for_expr (dta->ivopts_data, iv->ssa_name);
1903
1904 record_biv_for_address_use (dta->ivopts_data, iv);
1905 }
1906 return true;
1907 }
1908
1909 /* Records use in index IDX. Callback for for_each_index. Ivopts data
1910 object is passed to it in DATA. */
1911
1912 static bool
1913 idx_record_use (tree base, tree *idx,
1914 void *vdata)
1915 {
1916 struct ivopts_data *data = (struct ivopts_data *) vdata;
1917 find_interesting_uses_op (data, *idx);
1918 if (TREE_CODE (base) == ARRAY_REF || TREE_CODE (base) == ARRAY_RANGE_REF)
1919 {
1920 find_interesting_uses_op (data, array_ref_element_size (base));
1921 find_interesting_uses_op (data, array_ref_low_bound (base));
1922 }
1923 return true;
1924 }
1925
1926 /* If we can prove that TOP = cst * BOT for some constant cst,
1927 store cst to MUL and return true. Otherwise return false.
1928 The returned value is always sign-extended, regardless of the
1929 signedness of TOP and BOT. */
1930
1931 static bool
1932 constant_multiple_of (tree top, tree bot, widest_int *mul)
1933 {
1934 tree mby;
1935 enum tree_code code;
1936 unsigned precision = TYPE_PRECISION (TREE_TYPE (top));
1937 widest_int res, p0, p1;
1938
1939 STRIP_NOPS (top);
1940 STRIP_NOPS (bot);
1941
1942 if (operand_equal_p (top, bot, 0))
1943 {
1944 *mul = 1;
1945 return true;
1946 }
1947
1948 code = TREE_CODE (top);
1949 switch (code)
1950 {
1951 case MULT_EXPR:
1952 mby = TREE_OPERAND (top, 1);
1953 if (TREE_CODE (mby) != INTEGER_CST)
1954 return false;
1955
1956 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
1957 return false;
1958
1959 *mul = wi::sext (res * wi::to_widest (mby), precision);
1960 return true;
1961
1962 case PLUS_EXPR:
1963 case MINUS_EXPR:
1964 if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &p0)
1965 || !constant_multiple_of (TREE_OPERAND (top, 1), bot, &p1))
1966 return false;
1967
1968 if (code == MINUS_EXPR)
1969 p1 = -p1;
1970 *mul = wi::sext (p0 + p1, precision);
1971 return true;
1972
1973 case INTEGER_CST:
1974 if (TREE_CODE (bot) != INTEGER_CST)
1975 return false;
1976
1977 p0 = widest_int::from (top, SIGNED);
1978 p1 = widest_int::from (bot, SIGNED);
1979 if (p1 == 0)
1980 return false;
1981 *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
1982 return res == 0;
1983
1984 default:
1985 return false;
1986 }
1987 }
1988
1989 /* Return true if memory reference REF with step STEP may be unaligned. */
1990
1991 static bool
1992 may_be_unaligned_p (tree ref, tree step)
1993 {
1994 /* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
1995 thus they are not misaligned. */
1996 if (TREE_CODE (ref) == TARGET_MEM_REF)
1997 return false;
1998
1999 unsigned int align = TYPE_ALIGN (TREE_TYPE (ref));
2000 if (GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref))) > align)
2001 align = GET_MODE_ALIGNMENT (TYPE_MODE (TREE_TYPE (ref)));
2002
2003 unsigned HOST_WIDE_INT bitpos;
2004 unsigned int ref_align;
2005 get_object_alignment_1 (ref, &ref_align, &bitpos);
2006 if (ref_align < align
2007 || (bitpos % align) != 0
2008 || (bitpos % BITS_PER_UNIT) != 0)
2009 return true;
2010
2011 unsigned int trailing_zeros = tree_ctz (step);
2012 if (trailing_zeros < HOST_BITS_PER_INT
2013 && (1U << trailing_zeros) * BITS_PER_UNIT < align)
2014 return true;
2015
2016 return false;
2017 }
2018
2019 /* Return true if EXPR may be non-addressable. */
2020
2021 bool
2022 may_be_nonaddressable_p (tree expr)
2023 {
2024 switch (TREE_CODE (expr))
2025 {
2026 case TARGET_MEM_REF:
2027 /* TARGET_MEM_REFs are translated directly to valid MEMs on the
2028 target, thus they are always addressable. */
2029 return false;
2030
2031 case MEM_REF:
2032 /* Likewise for MEM_REFs, modulo the storage order. */
2033 return REF_REVERSE_STORAGE_ORDER (expr);
2034
2035 case BIT_FIELD_REF:
2036 if (REF_REVERSE_STORAGE_ORDER (expr))
2037 return true;
2038 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2039
2040 case COMPONENT_REF:
2041 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2042 return true;
2043 return DECL_NONADDRESSABLE_P (TREE_OPERAND (expr, 1))
2044 || may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2045
2046 case ARRAY_REF:
2047 case ARRAY_RANGE_REF:
2048 if (TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (expr, 0))))
2049 return true;
2050 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2051
2052 case VIEW_CONVERT_EXPR:
2053 /* This kind of view-conversions may wrap non-addressable objects
2054 and make them look addressable. After some processing the
2055 non-addressability may be uncovered again, causing ADDR_EXPRs
2056 of inappropriate objects to be built. */
2057 if (is_gimple_reg (TREE_OPERAND (expr, 0))
2058 || !is_gimple_addressable (TREE_OPERAND (expr, 0)))
2059 return true;
2060 return may_be_nonaddressable_p (TREE_OPERAND (expr, 0));
2061
2062 CASE_CONVERT:
2063 return true;
2064
2065 default:
2066 break;
2067 }
2068
2069 return false;
2070 }
2071
2072 static tree
2073 strip_offset (tree expr, unsigned HOST_WIDE_INT *offset);
2074
2075 /* Record a use of type USE_TYPE at *USE_P in STMT whose value is IV.
2076 If there is an existing use which has same stripped iv base and step,
2077 this function records this one as a sub use to that; otherwise records
2078 it as a normal one. */
2079
2080 static struct iv_use *
2081 record_group_use (struct ivopts_data *data, tree *use_p,
2082 struct iv *iv, gimple *stmt, enum use_type use_type)
2083 {
2084 unsigned int i;
2085 struct iv_use *use;
2086 tree addr_base;
2087 unsigned HOST_WIDE_INT addr_offset;
2088
2089 /* Only support sub use for address type uses, that is, with base
2090 object. */
2091 if (!iv->base_object)
2092 return record_use (data, use_p, iv, stmt, use_type);
2093
2094 addr_base = strip_offset (iv->base, &addr_offset);
2095 for (i = 0; i < n_iv_uses (data); i++)
2096 {
2097 use = iv_use (data, i);
2098 if (use->type != USE_ADDRESS || !use->iv->base_object)
2099 continue;
2100
2101 /* Check if it has the same stripped base and step. */
2102 if (operand_equal_p (iv->base_object, use->iv->base_object, 0)
2103 && operand_equal_p (iv->step, use->iv->step, 0)
2104 && operand_equal_p (addr_base, use->addr_base, 0))
2105 break;
2106 }
2107
2108 if (i == n_iv_uses (data))
2109 return record_use (data, use_p, iv, stmt,
2110 use_type, addr_base, addr_offset);
2111 else
2112 return record_sub_use (data, use_p, iv, stmt,
2113 use_type, addr_base, addr_offset, i);
2114 }
2115
2116 /* Finds addresses in *OP_P inside STMT. */
2117
2118 static void
2119 find_interesting_uses_address (struct ivopts_data *data, gimple *stmt,
2120 tree *op_p)
2121 {
2122 tree base = *op_p, step = size_zero_node;
2123 struct iv *civ;
2124 struct ifs_ivopts_data ifs_ivopts_data;
2125
2126 /* Do not play with volatile memory references. A bit too conservative,
2127 perhaps, but safe. */
2128 if (gimple_has_volatile_ops (stmt))
2129 goto fail;
2130
2131 /* Ignore bitfields for now. Not really something terribly complicated
2132 to handle. TODO. */
2133 if (TREE_CODE (base) == BIT_FIELD_REF)
2134 goto fail;
2135
2136 base = unshare_expr (base);
2137
2138 if (TREE_CODE (base) == TARGET_MEM_REF)
2139 {
2140 tree type = build_pointer_type (TREE_TYPE (base));
2141 tree astep;
2142
2143 if (TMR_BASE (base)
2144 && TREE_CODE (TMR_BASE (base)) == SSA_NAME)
2145 {
2146 civ = get_iv (data, TMR_BASE (base));
2147 if (!civ)
2148 goto fail;
2149
2150 TMR_BASE (base) = civ->base;
2151 step = civ->step;
2152 }
2153 if (TMR_INDEX2 (base)
2154 && TREE_CODE (TMR_INDEX2 (base)) == SSA_NAME)
2155 {
2156 civ = get_iv (data, TMR_INDEX2 (base));
2157 if (!civ)
2158 goto fail;
2159
2160 TMR_INDEX2 (base) = civ->base;
2161 step = civ->step;
2162 }
2163 if (TMR_INDEX (base)
2164 && TREE_CODE (TMR_INDEX (base)) == SSA_NAME)
2165 {
2166 civ = get_iv (data, TMR_INDEX (base));
2167 if (!civ)
2168 goto fail;
2169
2170 TMR_INDEX (base) = civ->base;
2171 astep = civ->step;
2172
2173 if (astep)
2174 {
2175 if (TMR_STEP (base))
2176 astep = fold_build2 (MULT_EXPR, type, TMR_STEP (base), astep);
2177
2178 step = fold_build2 (PLUS_EXPR, type, step, astep);
2179 }
2180 }
2181
2182 if (integer_zerop (step))
2183 goto fail;
2184 base = tree_mem_ref_addr (type, base);
2185 }
2186 else
2187 {
2188 ifs_ivopts_data.ivopts_data = data;
2189 ifs_ivopts_data.stmt = stmt;
2190 ifs_ivopts_data.step = size_zero_node;
2191 if (!for_each_index (&base, idx_find_step, &ifs_ivopts_data)
2192 || integer_zerop (ifs_ivopts_data.step))
2193 goto fail;
2194 step = ifs_ivopts_data.step;
2195
2196 /* Check that the base expression is addressable. This needs
2197 to be done after substituting bases of IVs into it. */
2198 if (may_be_nonaddressable_p (base))
2199 goto fail;
2200
2201 /* Moreover, on strict alignment platforms, check that it is
2202 sufficiently aligned. */
2203 if (STRICT_ALIGNMENT && may_be_unaligned_p (base, step))
2204 goto fail;
2205
2206 base = build_fold_addr_expr (base);
2207
2208 /* Substituting bases of IVs into the base expression might
2209 have caused folding opportunities. */
2210 if (TREE_CODE (base) == ADDR_EXPR)
2211 {
2212 tree *ref = &TREE_OPERAND (base, 0);
2213 while (handled_component_p (*ref))
2214 ref = &TREE_OPERAND (*ref, 0);
2215 if (TREE_CODE (*ref) == MEM_REF)
2216 {
2217 tree tem = fold_binary (MEM_REF, TREE_TYPE (*ref),
2218 TREE_OPERAND (*ref, 0),
2219 TREE_OPERAND (*ref, 1));
2220 if (tem)
2221 *ref = tem;
2222 }
2223 }
2224 }
2225
2226 civ = alloc_iv (data, base, step);
2227 record_group_use (data, op_p, civ, stmt, USE_ADDRESS);
2228 return;
2229
2230 fail:
2231 for_each_index (op_p, idx_record_use, data);
2232 }
2233
2234 /* Finds and records invariants used in STMT. */
2235
2236 static void
2237 find_invariants_stmt (struct ivopts_data *data, gimple *stmt)
2238 {
2239 ssa_op_iter iter;
2240 use_operand_p use_p;
2241 tree op;
2242
2243 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2244 {
2245 op = USE_FROM_PTR (use_p);
2246 record_invariant (data, op, false);
2247 }
2248 }
2249
2250 /* Finds interesting uses of induction variables in the statement STMT. */
2251
2252 static void
2253 find_interesting_uses_stmt (struct ivopts_data *data, gimple *stmt)
2254 {
2255 struct iv *iv;
2256 tree op, *lhs, *rhs;
2257 ssa_op_iter iter;
2258 use_operand_p use_p;
2259 enum tree_code code;
2260
2261 find_invariants_stmt (data, stmt);
2262
2263 if (gimple_code (stmt) == GIMPLE_COND)
2264 {
2265 find_interesting_uses_cond (data, stmt);
2266 return;
2267 }
2268
2269 if (is_gimple_assign (stmt))
2270 {
2271 lhs = gimple_assign_lhs_ptr (stmt);
2272 rhs = gimple_assign_rhs1_ptr (stmt);
2273
2274 if (TREE_CODE (*lhs) == SSA_NAME)
2275 {
2276 /* If the statement defines an induction variable, the uses are not
2277 interesting by themselves. */
2278
2279 iv = get_iv (data, *lhs);
2280
2281 if (iv && !integer_zerop (iv->step))
2282 return;
2283 }
2284
2285 code = gimple_assign_rhs_code (stmt);
2286 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
2287 && (REFERENCE_CLASS_P (*rhs)
2288 || is_gimple_val (*rhs)))
2289 {
2290 if (REFERENCE_CLASS_P (*rhs))
2291 find_interesting_uses_address (data, stmt, rhs);
2292 else
2293 find_interesting_uses_op (data, *rhs);
2294
2295 if (REFERENCE_CLASS_P (*lhs))
2296 find_interesting_uses_address (data, stmt, lhs);
2297 return;
2298 }
2299 else if (TREE_CODE_CLASS (code) == tcc_comparison)
2300 {
2301 find_interesting_uses_cond (data, stmt);
2302 return;
2303 }
2304
2305 /* TODO -- we should also handle address uses of type
2306
2307 memory = call (whatever);
2308
2309 and
2310
2311 call (memory). */
2312 }
2313
2314 if (gimple_code (stmt) == GIMPLE_PHI
2315 && gimple_bb (stmt) == data->current_loop->header)
2316 {
2317 iv = get_iv (data, PHI_RESULT (stmt));
2318
2319 if (iv && !integer_zerop (iv->step))
2320 return;
2321 }
2322
2323 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2324 {
2325 op = USE_FROM_PTR (use_p);
2326
2327 if (TREE_CODE (op) != SSA_NAME)
2328 continue;
2329
2330 iv = get_iv (data, op);
2331 if (!iv)
2332 continue;
2333
2334 find_interesting_uses_op (data, op);
2335 }
2336 }
2337
2338 /* Finds interesting uses of induction variables outside of loops
2339 on loop exit edge EXIT. */
2340
2341 static void
2342 find_interesting_uses_outside (struct ivopts_data *data, edge exit)
2343 {
2344 gphi *phi;
2345 gphi_iterator psi;
2346 tree def;
2347
2348 for (psi = gsi_start_phis (exit->dest); !gsi_end_p (psi); gsi_next (&psi))
2349 {
2350 phi = psi.phi ();
2351 def = PHI_ARG_DEF_FROM_EDGE (phi, exit);
2352 if (!virtual_operand_p (def))
2353 find_interesting_uses_op (data, def);
2354 }
2355 }
2356
2357 /* Finds uses of the induction variables that are interesting. */
2358
2359 static void
2360 find_interesting_uses (struct ivopts_data *data)
2361 {
2362 basic_block bb;
2363 gimple_stmt_iterator bsi;
2364 basic_block *body = get_loop_body (data->current_loop);
2365 unsigned i;
2366 struct version_info *info;
2367 edge e;
2368
2369 if (dump_file && (dump_flags & TDF_DETAILS))
2370 fprintf (dump_file, "Uses:\n\n");
2371
2372 for (i = 0; i < data->current_loop->num_nodes; i++)
2373 {
2374 edge_iterator ei;
2375 bb = body[i];
2376
2377 FOR_EACH_EDGE (e, ei, bb->succs)
2378 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2379 && !flow_bb_inside_loop_p (data->current_loop, e->dest))
2380 find_interesting_uses_outside (data, e);
2381
2382 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2383 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2384 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2385 if (!is_gimple_debug (gsi_stmt (bsi)))
2386 find_interesting_uses_stmt (data, gsi_stmt (bsi));
2387 }
2388
2389 if (dump_file && (dump_flags & TDF_DETAILS))
2390 {
2391 bitmap_iterator bi;
2392
2393 fprintf (dump_file, "\n");
2394
2395 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
2396 {
2397 info = ver_info (data, i);
2398 if (info->inv_id)
2399 {
2400 fprintf (dump_file, " ");
2401 print_generic_expr (dump_file, info->name, TDF_SLIM);
2402 fprintf (dump_file, " is invariant (%d)%s\n",
2403 info->inv_id, info->has_nonlin_use ? "" : ", eliminable");
2404 }
2405 }
2406
2407 fprintf (dump_file, "\n");
2408 }
2409
2410 free (body);
2411 }
2412
2413 /* Compute maximum offset of [base + offset] addressing mode
2414 for memory reference represented by USE. */
2415
2416 static HOST_WIDE_INT
2417 compute_max_addr_offset (struct iv_use *use)
2418 {
2419 int width;
2420 rtx reg, addr;
2421 HOST_WIDE_INT i, off;
2422 unsigned list_index, num;
2423 addr_space_t as;
2424 machine_mode mem_mode, addr_mode;
2425 static vec<HOST_WIDE_INT> max_offset_list;
2426
2427 as = TYPE_ADDR_SPACE (TREE_TYPE (use->iv->base));
2428 mem_mode = TYPE_MODE (TREE_TYPE (*use->op_p));
2429
2430 num = max_offset_list.length ();
2431 list_index = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
2432 if (list_index >= num)
2433 {
2434 max_offset_list.safe_grow (list_index + MAX_MACHINE_MODE);
2435 for (; num < max_offset_list.length (); num++)
2436 max_offset_list[num] = -1;
2437 }
2438
2439 off = max_offset_list[list_index];
2440 if (off != -1)
2441 return off;
2442
2443 addr_mode = targetm.addr_space.address_mode (as);
2444 reg = gen_raw_REG (addr_mode, LAST_VIRTUAL_REGISTER + 1);
2445 addr = gen_rtx_fmt_ee (PLUS, addr_mode, reg, NULL_RTX);
2446
2447 width = GET_MODE_BITSIZE (addr_mode) - 1;
2448 if (width > (HOST_BITS_PER_WIDE_INT - 1))
2449 width = HOST_BITS_PER_WIDE_INT - 1;
2450
2451 for (i = width; i > 0; i--)
2452 {
2453 off = ((unsigned HOST_WIDE_INT) 1 << i) - 1;
2454 XEXP (addr, 1) = gen_int_mode (off, addr_mode);
2455 if (memory_address_addr_space_p (mem_mode, addr, as))
2456 break;
2457
2458 /* For some strict-alignment targets, the offset must be naturally
2459 aligned. Try an aligned offset if mem_mode is not QImode. */
2460 off = ((unsigned HOST_WIDE_INT) 1 << i);
2461 if (off > GET_MODE_SIZE (mem_mode) && mem_mode != QImode)
2462 {
2463 off -= GET_MODE_SIZE (mem_mode);
2464 XEXP (addr, 1) = gen_int_mode (off, addr_mode);
2465 if (memory_address_addr_space_p (mem_mode, addr, as))
2466 break;
2467 }
2468 }
2469 if (i == 0)
2470 off = 0;
2471
2472 max_offset_list[list_index] = off;
2473 return off;
2474 }
2475
2476 /* Check if all small groups should be split. Return true if and
2477 only if:
2478
2479 1) At least one groups contain two uses with different offsets.
2480 2) No group contains more than two uses with different offsets.
2481
2482 Return false otherwise. We want to split such groups because:
2483
2484 1) Small groups don't have much benefit and may interfer with
2485 general candidate selection.
2486 2) Size for problem with only small groups is usually small and
2487 general algorithm can handle it well.
2488
2489 TODO -- Above claim may not hold when auto increment is supported. */
2490
2491 static bool
2492 split_all_small_groups (struct ivopts_data *data)
2493 {
2494 bool split_p = false;
2495 unsigned int i, n, distinct;
2496 struct iv_use *pre, *use;
2497
2498 n = n_iv_uses (data);
2499 for (i = 0; i < n; i++)
2500 {
2501 use = iv_use (data, i);
2502 if (!use->next)
2503 continue;
2504
2505 distinct = 1;
2506 gcc_assert (use->type == USE_ADDRESS);
2507 for (pre = use, use = use->next; use; pre = use, use = use->next)
2508 {
2509 if (pre->addr_offset != use->addr_offset)
2510 distinct++;
2511
2512 if (distinct > 2)
2513 return false;
2514 }
2515 if (distinct == 2)
2516 split_p = true;
2517 }
2518
2519 return split_p;
2520 }
2521
2522 /* For each group of address type uses, this function further groups
2523 these uses according to the maximum offset supported by target's
2524 [base + offset] addressing mode. */
2525
2526 static void
2527 group_address_uses (struct ivopts_data *data)
2528 {
2529 HOST_WIDE_INT max_offset = -1;
2530 unsigned int i, n, sub_id;
2531 struct iv_use *pre, *use;
2532 unsigned HOST_WIDE_INT addr_offset_first;
2533
2534 /* Reset max offset to split all small groups. */
2535 if (split_all_small_groups (data))
2536 max_offset = 0;
2537
2538 n = n_iv_uses (data);
2539 for (i = 0; i < n; i++)
2540 {
2541 use = iv_use (data, i);
2542 if (!use->next)
2543 continue;
2544
2545 gcc_assert (use->type == USE_ADDRESS);
2546 if (max_offset != 0)
2547 max_offset = compute_max_addr_offset (use);
2548
2549 while (use)
2550 {
2551 sub_id = 0;
2552 addr_offset_first = use->addr_offset;
2553 /* Only uses with offset that can fit in offset part against
2554 the first use can be grouped together. */
2555 for (pre = use, use = use->next;
2556 use && (use->addr_offset - addr_offset_first
2557 <= (unsigned HOST_WIDE_INT) max_offset);
2558 pre = use, use = use->next)
2559 {
2560 use->id = pre->id;
2561 use->sub_id = ++sub_id;
2562 }
2563
2564 /* Break the list and create new group. */
2565 if (use)
2566 {
2567 pre->next = NULL;
2568 use->id = n_iv_uses (data);
2569 use->related_cands = BITMAP_ALLOC (NULL);
2570 data->iv_uses.safe_push (use);
2571 }
2572 }
2573 }
2574
2575 if (dump_file && (dump_flags & TDF_DETAILS))
2576 dump_uses (dump_file, data);
2577 }
2578
2579 /* Strips constant offsets from EXPR and stores them to OFFSET. If INSIDE_ADDR
2580 is true, assume we are inside an address. If TOP_COMPREF is true, assume
2581 we are at the top-level of the processed address. */
2582
2583 static tree
2584 strip_offset_1 (tree expr, bool inside_addr, bool top_compref,
2585 HOST_WIDE_INT *offset)
2586 {
2587 tree op0 = NULL_TREE, op1 = NULL_TREE, tmp, step;
2588 enum tree_code code;
2589 tree type, orig_type = TREE_TYPE (expr);
2590 HOST_WIDE_INT off0, off1, st;
2591 tree orig_expr = expr;
2592
2593 STRIP_NOPS (expr);
2594
2595 type = TREE_TYPE (expr);
2596 code = TREE_CODE (expr);
2597 *offset = 0;
2598
2599 switch (code)
2600 {
2601 case INTEGER_CST:
2602 if (!cst_and_fits_in_hwi (expr)
2603 || integer_zerop (expr))
2604 return orig_expr;
2605
2606 *offset = int_cst_value (expr);
2607 return build_int_cst (orig_type, 0);
2608
2609 case POINTER_PLUS_EXPR:
2610 case PLUS_EXPR:
2611 case MINUS_EXPR:
2612 op0 = TREE_OPERAND (expr, 0);
2613 op1 = TREE_OPERAND (expr, 1);
2614
2615 op0 = strip_offset_1 (op0, false, false, &off0);
2616 op1 = strip_offset_1 (op1, false, false, &off1);
2617
2618 *offset = (code == MINUS_EXPR ? off0 - off1 : off0 + off1);
2619 if (op0 == TREE_OPERAND (expr, 0)
2620 && op1 == TREE_OPERAND (expr, 1))
2621 return orig_expr;
2622
2623 if (integer_zerop (op1))
2624 expr = op0;
2625 else if (integer_zerop (op0))
2626 {
2627 if (code == MINUS_EXPR)
2628 expr = fold_build1 (NEGATE_EXPR, type, op1);
2629 else
2630 expr = op1;
2631 }
2632 else
2633 expr = fold_build2 (code, type, op0, op1);
2634
2635 return fold_convert (orig_type, expr);
2636
2637 case MULT_EXPR:
2638 op1 = TREE_OPERAND (expr, 1);
2639 if (!cst_and_fits_in_hwi (op1))
2640 return orig_expr;
2641
2642 op0 = TREE_OPERAND (expr, 0);
2643 op0 = strip_offset_1 (op0, false, false, &off0);
2644 if (op0 == TREE_OPERAND (expr, 0))
2645 return orig_expr;
2646
2647 *offset = off0 * int_cst_value (op1);
2648 if (integer_zerop (op0))
2649 expr = op0;
2650 else
2651 expr = fold_build2 (MULT_EXPR, type, op0, op1);
2652
2653 return fold_convert (orig_type, expr);
2654
2655 case ARRAY_REF:
2656 case ARRAY_RANGE_REF:
2657 if (!inside_addr)
2658 return orig_expr;
2659
2660 step = array_ref_element_size (expr);
2661 if (!cst_and_fits_in_hwi (step))
2662 break;
2663
2664 st = int_cst_value (step);
2665 op1 = TREE_OPERAND (expr, 1);
2666 op1 = strip_offset_1 (op1, false, false, &off1);
2667 *offset = off1 * st;
2668
2669 if (top_compref
2670 && integer_zerop (op1))
2671 {
2672 /* Strip the component reference completely. */
2673 op0 = TREE_OPERAND (expr, 0);
2674 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2675 *offset += off0;
2676 return op0;
2677 }
2678 break;
2679
2680 case COMPONENT_REF:
2681 {
2682 tree field;
2683
2684 if (!inside_addr)
2685 return orig_expr;
2686
2687 tmp = component_ref_field_offset (expr);
2688 field = TREE_OPERAND (expr, 1);
2689 if (top_compref
2690 && cst_and_fits_in_hwi (tmp)
2691 && cst_and_fits_in_hwi (DECL_FIELD_BIT_OFFSET (field)))
2692 {
2693 HOST_WIDE_INT boffset, abs_off;
2694
2695 /* Strip the component reference completely. */
2696 op0 = TREE_OPERAND (expr, 0);
2697 op0 = strip_offset_1 (op0, inside_addr, top_compref, &off0);
2698 boffset = int_cst_value (DECL_FIELD_BIT_OFFSET (field));
2699 abs_off = abs_hwi (boffset) / BITS_PER_UNIT;
2700 if (boffset < 0)
2701 abs_off = -abs_off;
2702
2703 *offset = off0 + int_cst_value (tmp) + abs_off;
2704 return op0;
2705 }
2706 }
2707 break;
2708
2709 case ADDR_EXPR:
2710 op0 = TREE_OPERAND (expr, 0);
2711 op0 = strip_offset_1 (op0, true, true, &off0);
2712 *offset += off0;
2713
2714 if (op0 == TREE_OPERAND (expr, 0))
2715 return orig_expr;
2716
2717 expr = build_fold_addr_expr (op0);
2718 return fold_convert (orig_type, expr);
2719
2720 case MEM_REF:
2721 /* ??? Offset operand? */
2722 inside_addr = false;
2723 break;
2724
2725 default:
2726 return orig_expr;
2727 }
2728
2729 /* Default handling of expressions for that we want to recurse into
2730 the first operand. */
2731 op0 = TREE_OPERAND (expr, 0);
2732 op0 = strip_offset_1 (op0, inside_addr, false, &off0);
2733 *offset += off0;
2734
2735 if (op0 == TREE_OPERAND (expr, 0)
2736 && (!op1 || op1 == TREE_OPERAND (expr, 1)))
2737 return orig_expr;
2738
2739 expr = copy_node (expr);
2740 TREE_OPERAND (expr, 0) = op0;
2741 if (op1)
2742 TREE_OPERAND (expr, 1) = op1;
2743
2744 /* Inside address, we might strip the top level component references,
2745 thus changing type of the expression. Handling of ADDR_EXPR
2746 will fix that. */
2747 expr = fold_convert (orig_type, expr);
2748
2749 return expr;
2750 }
2751
2752 /* Strips constant offsets from EXPR and stores them to OFFSET. */
2753
2754 static tree
2755 strip_offset (tree expr, unsigned HOST_WIDE_INT *offset)
2756 {
2757 HOST_WIDE_INT off;
2758 tree core = strip_offset_1 (expr, false, false, &off);
2759 *offset = off;
2760 return core;
2761 }
2762
2763 /* Returns variant of TYPE that can be used as base for different uses.
2764 We return unsigned type with the same precision, which avoids problems
2765 with overflows. */
2766
2767 static tree
2768 generic_type_for (tree type)
2769 {
2770 if (POINTER_TYPE_P (type))
2771 return unsigned_type_for (type);
2772
2773 if (TYPE_UNSIGNED (type))
2774 return type;
2775
2776 return unsigned_type_for (type);
2777 }
2778
2779 /* Records invariants in *EXPR_P. Callback for walk_tree. DATA contains
2780 the bitmap to that we should store it. */
2781
2782 static struct ivopts_data *fd_ivopts_data;
2783 static tree
2784 find_depends (tree *expr_p, int *ws ATTRIBUTE_UNUSED, void *data)
2785 {
2786 bitmap *depends_on = (bitmap *) data;
2787 struct version_info *info;
2788
2789 if (TREE_CODE (*expr_p) != SSA_NAME)
2790 return NULL_TREE;
2791 info = name_info (fd_ivopts_data, *expr_p);
2792
2793 if (!info->inv_id || info->has_nonlin_use)
2794 return NULL_TREE;
2795
2796 if (!*depends_on)
2797 *depends_on = BITMAP_ALLOC (NULL);
2798 bitmap_set_bit (*depends_on, info->inv_id);
2799
2800 return NULL_TREE;
2801 }
2802
2803 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
2804 position to POS. If USE is not NULL, the candidate is set as related to
2805 it. If both BASE and STEP are NULL, we add a pseudocandidate for the
2806 replacement of the final value of the iv by a direct computation. */
2807
2808 static struct iv_cand *
2809 add_candidate_1 (struct ivopts_data *data,
2810 tree base, tree step, bool important, enum iv_position pos,
2811 struct iv_use *use, gimple *incremented_at,
2812 struct iv *orig_iv = NULL)
2813 {
2814 unsigned i;
2815 struct iv_cand *cand = NULL;
2816 tree type, orig_type;
2817
2818 /* -fkeep-gc-roots-live means that we have to keep a real pointer
2819 live, but the ivopts code may replace a real pointer with one
2820 pointing before or after the memory block that is then adjusted
2821 into the memory block during the loop. FIXME: It would likely be
2822 better to actually force the pointer live and still use ivopts;
2823 for example, it would be enough to write the pointer into memory
2824 and keep it there until after the loop. */
2825 if (flag_keep_gc_roots_live && POINTER_TYPE_P (TREE_TYPE (base)))
2826 return NULL;
2827
2828 /* For non-original variables, make sure their values are computed in a type
2829 that does not invoke undefined behavior on overflows (since in general,
2830 we cannot prove that these induction variables are non-wrapping). */
2831 if (pos != IP_ORIGINAL)
2832 {
2833 orig_type = TREE_TYPE (base);
2834 type = generic_type_for (orig_type);
2835 if (type != orig_type)
2836 {
2837 base = fold_convert (type, base);
2838 step = fold_convert (type, step);
2839 }
2840 }
2841
2842 for (i = 0; i < n_iv_cands (data); i++)
2843 {
2844 cand = iv_cand (data, i);
2845
2846 if (cand->pos != pos)
2847 continue;
2848
2849 if (cand->incremented_at != incremented_at
2850 || ((pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
2851 && cand->ainc_use != use))
2852 continue;
2853
2854 if (!cand->iv)
2855 {
2856 if (!base && !step)
2857 break;
2858
2859 continue;
2860 }
2861
2862 if (!base && !step)
2863 continue;
2864
2865 if (operand_equal_p (base, cand->iv->base, 0)
2866 && operand_equal_p (step, cand->iv->step, 0)
2867 && (TYPE_PRECISION (TREE_TYPE (base))
2868 == TYPE_PRECISION (TREE_TYPE (cand->iv->base))))
2869 break;
2870 }
2871
2872 if (i == n_iv_cands (data))
2873 {
2874 cand = XCNEW (struct iv_cand);
2875 cand->id = i;
2876
2877 if (!base && !step)
2878 cand->iv = NULL;
2879 else
2880 cand->iv = alloc_iv (data, base, step);
2881
2882 cand->pos = pos;
2883 if (pos != IP_ORIGINAL && cand->iv)
2884 {
2885 cand->var_before = create_tmp_var_raw (TREE_TYPE (base), "ivtmp");
2886 cand->var_after = cand->var_before;
2887 }
2888 cand->important = important;
2889 cand->incremented_at = incremented_at;
2890 data->iv_candidates.safe_push (cand);
2891
2892 if (step
2893 && TREE_CODE (step) != INTEGER_CST)
2894 {
2895 fd_ivopts_data = data;
2896 walk_tree (&step, find_depends, &cand->depends_on, NULL);
2897 }
2898
2899 if (pos == IP_AFTER_USE || pos == IP_BEFORE_USE)
2900 cand->ainc_use = use;
2901 else
2902 cand->ainc_use = NULL;
2903
2904 cand->orig_iv = orig_iv;
2905 if (dump_file && (dump_flags & TDF_DETAILS))
2906 dump_cand (dump_file, cand);
2907 }
2908
2909 if (important && !cand->important)
2910 {
2911 cand->important = true;
2912 if (dump_file && (dump_flags & TDF_DETAILS))
2913 fprintf (dump_file, "Candidate %d is important\n", cand->id);
2914 }
2915
2916 if (use)
2917 {
2918 bitmap_set_bit (use->related_cands, i);
2919 if (dump_file && (dump_flags & TDF_DETAILS))
2920 fprintf (dump_file, "Candidate %d is related to use %d\n",
2921 cand->id, use->id);
2922 }
2923
2924 return cand;
2925 }
2926
2927 /* Returns true if incrementing the induction variable at the end of the LOOP
2928 is allowed.
2929
2930 The purpose is to avoid splitting latch edge with a biv increment, thus
2931 creating a jump, possibly confusing other optimization passes and leaving
2932 less freedom to scheduler. So we allow IP_END_POS only if IP_NORMAL_POS
2933 is not available (so we do not have a better alternative), or if the latch
2934 edge is already nonempty. */
2935
2936 static bool
2937 allow_ip_end_pos_p (struct loop *loop)
2938 {
2939 if (!ip_normal_pos (loop))
2940 return true;
2941
2942 if (!empty_block_p (ip_end_pos (loop)))
2943 return true;
2944
2945 return false;
2946 }
2947
2948 /* If possible, adds autoincrement candidates BASE + STEP * i based on use USE.
2949 Important field is set to IMPORTANT. */
2950
2951 static void
2952 add_autoinc_candidates (struct ivopts_data *data, tree base, tree step,
2953 bool important, struct iv_use *use)
2954 {
2955 basic_block use_bb = gimple_bb (use->stmt);
2956 machine_mode mem_mode;
2957 unsigned HOST_WIDE_INT cstepi;
2958
2959 /* If we insert the increment in any position other than the standard
2960 ones, we must ensure that it is incremented once per iteration.
2961 It must not be in an inner nested loop, or one side of an if
2962 statement. */
2963 if (use_bb->loop_father != data->current_loop
2964 || !dominated_by_p (CDI_DOMINATORS, data->current_loop->latch, use_bb)
2965 || stmt_could_throw_p (use->stmt)
2966 || !cst_and_fits_in_hwi (step))
2967 return;
2968
2969 cstepi = int_cst_value (step);
2970
2971 mem_mode = TYPE_MODE (TREE_TYPE (*use->op_p));
2972 if (((USE_LOAD_PRE_INCREMENT (mem_mode)
2973 || USE_STORE_PRE_INCREMENT (mem_mode))
2974 && GET_MODE_SIZE (mem_mode) == cstepi)
2975 || ((USE_LOAD_PRE_DECREMENT (mem_mode)
2976 || USE_STORE_PRE_DECREMENT (mem_mode))
2977 && GET_MODE_SIZE (mem_mode) == -cstepi))
2978 {
2979 enum tree_code code = MINUS_EXPR;
2980 tree new_base;
2981 tree new_step = step;
2982
2983 if (POINTER_TYPE_P (TREE_TYPE (base)))
2984 {
2985 new_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
2986 code = POINTER_PLUS_EXPR;
2987 }
2988 else
2989 new_step = fold_convert (TREE_TYPE (base), new_step);
2990 new_base = fold_build2 (code, TREE_TYPE (base), base, new_step);
2991 add_candidate_1 (data, new_base, step, important, IP_BEFORE_USE, use,
2992 use->stmt);
2993 }
2994 if (((USE_LOAD_POST_INCREMENT (mem_mode)
2995 || USE_STORE_POST_INCREMENT (mem_mode))
2996 && GET_MODE_SIZE (mem_mode) == cstepi)
2997 || ((USE_LOAD_POST_DECREMENT (mem_mode)
2998 || USE_STORE_POST_DECREMENT (mem_mode))
2999 && GET_MODE_SIZE (mem_mode) == -cstepi))
3000 {
3001 add_candidate_1 (data, base, step, important, IP_AFTER_USE, use,
3002 use->stmt);
3003 }
3004 }
3005
3006 /* Adds a candidate BASE + STEP * i. Important field is set to IMPORTANT and
3007 position to POS. If USE is not NULL, the candidate is set as related to
3008 it. The candidate computation is scheduled before exit condition and at
3009 the end of loop. */
3010
3011 static void
3012 add_candidate (struct ivopts_data *data,
3013 tree base, tree step, bool important, struct iv_use *use,
3014 struct iv *orig_iv = NULL)
3015 {
3016 gcc_assert (use == NULL || use->sub_id == 0);
3017
3018 if (ip_normal_pos (data->current_loop))
3019 add_candidate_1 (data, base, step, important,
3020 IP_NORMAL, use, NULL, orig_iv);
3021 if (ip_end_pos (data->current_loop)
3022 && allow_ip_end_pos_p (data->current_loop))
3023 add_candidate_1 (data, base, step, important, IP_END, use, NULL, orig_iv);
3024 }
3025
3026 /* Adds standard iv candidates. */
3027
3028 static void
3029 add_standard_iv_candidates (struct ivopts_data *data)
3030 {
3031 add_candidate (data, integer_zero_node, integer_one_node, true, NULL);
3032
3033 /* The same for a double-integer type if it is still fast enough. */
3034 if (TYPE_PRECISION
3035 (long_integer_type_node) > TYPE_PRECISION (integer_type_node)
3036 && TYPE_PRECISION (long_integer_type_node) <= BITS_PER_WORD)
3037 add_candidate (data, build_int_cst (long_integer_type_node, 0),
3038 build_int_cst (long_integer_type_node, 1), true, NULL);
3039
3040 /* The same for a double-integer type if it is still fast enough. */
3041 if (TYPE_PRECISION
3042 (long_long_integer_type_node) > TYPE_PRECISION (long_integer_type_node)
3043 && TYPE_PRECISION (long_long_integer_type_node) <= BITS_PER_WORD)
3044 add_candidate (data, build_int_cst (long_long_integer_type_node, 0),
3045 build_int_cst (long_long_integer_type_node, 1), true, NULL);
3046 }
3047
3048
3049 /* Adds candidates bases on the old induction variable IV. */
3050
3051 static void
3052 add_iv_candidate_for_biv (struct ivopts_data *data, struct iv *iv)
3053 {
3054 gimple *phi;
3055 tree def;
3056 struct iv_cand *cand;
3057
3058 /* Check if this biv is used in address type use. */
3059 if (iv->no_overflow && iv->have_address_use
3060 && INTEGRAL_TYPE_P (TREE_TYPE (iv->base))
3061 && TYPE_PRECISION (TREE_TYPE (iv->base)) < TYPE_PRECISION (sizetype))
3062 {
3063 tree base = fold_convert (sizetype, iv->base);
3064 tree step = fold_convert (sizetype, iv->step);
3065
3066 /* Add iv cand of same precision as index part in TARGET_MEM_REF. */
3067 add_candidate (data, base, step, true, NULL, iv);
3068 /* Add iv cand of the original type only if it has nonlinear use. */
3069 if (iv->have_use_for)
3070 add_candidate (data, iv->base, iv->step, true, NULL);
3071 }
3072 else
3073 add_candidate (data, iv->base, iv->step, true, NULL);
3074
3075 /* The same, but with initial value zero. */
3076 if (POINTER_TYPE_P (TREE_TYPE (iv->base)))
3077 add_candidate (data, size_int (0), iv->step, true, NULL);
3078 else
3079 add_candidate (data, build_int_cst (TREE_TYPE (iv->base), 0),
3080 iv->step, true, NULL);
3081
3082 phi = SSA_NAME_DEF_STMT (iv->ssa_name);
3083 if (gimple_code (phi) == GIMPLE_PHI)
3084 {
3085 /* Additionally record the possibility of leaving the original iv
3086 untouched. */
3087 def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (data->current_loop));
3088 /* Don't add candidate if it's from another PHI node because
3089 it's an affine iv appearing in the form of PEELED_CHREC. */
3090 phi = SSA_NAME_DEF_STMT (def);
3091 if (gimple_code (phi) != GIMPLE_PHI)
3092 {
3093 cand = add_candidate_1 (data,
3094 iv->base, iv->step, true, IP_ORIGINAL, NULL,
3095 SSA_NAME_DEF_STMT (def));
3096 if (cand)
3097 {
3098 cand->var_before = iv->ssa_name;
3099 cand->var_after = def;
3100 }
3101 }
3102 else
3103 gcc_assert (gimple_bb (phi) == data->current_loop->header);
3104 }
3105 }
3106
3107 /* Adds candidates based on the old induction variables. */
3108
3109 static void
3110 add_iv_candidate_for_bivs (struct ivopts_data *data)
3111 {
3112 unsigned i;
3113 struct iv *iv;
3114 bitmap_iterator bi;
3115
3116 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
3117 {
3118 iv = ver_info (data, i)->iv;
3119 if (iv && iv->biv_p && !integer_zerop (iv->step))
3120 add_iv_candidate_for_biv (data, iv);
3121 }
3122 }
3123
3124 /* Record common candidate {BASE, STEP} derived from USE in hashtable. */
3125
3126 static void
3127 record_common_cand (struct ivopts_data *data, tree base,
3128 tree step, struct iv_use *use)
3129 {
3130 struct iv_common_cand ent;
3131 struct iv_common_cand **slot;
3132
3133 gcc_assert (use != NULL);
3134
3135 ent.base = base;
3136 ent.step = step;
3137 ent.hash = iterative_hash_expr (base, 0);
3138 ent.hash = iterative_hash_expr (step, ent.hash);
3139
3140 slot = data->iv_common_cand_tab->find_slot (&ent, INSERT);
3141 if (*slot == NULL)
3142 {
3143 *slot = new iv_common_cand ();
3144 (*slot)->base = base;
3145 (*slot)->step = step;
3146 (*slot)->uses.create (8);
3147 (*slot)->hash = ent.hash;
3148 data->iv_common_cands.safe_push ((*slot));
3149 }
3150 (*slot)->uses.safe_push (use);
3151 return;
3152 }
3153
3154 /* Comparison function used to sort common candidates. */
3155
3156 static int
3157 common_cand_cmp (const void *p1, const void *p2)
3158 {
3159 unsigned n1, n2;
3160 const struct iv_common_cand *const *const ccand1
3161 = (const struct iv_common_cand *const *)p1;
3162 const struct iv_common_cand *const *const ccand2
3163 = (const struct iv_common_cand *const *)p2;
3164
3165 n1 = (*ccand1)->uses.length ();
3166 n2 = (*ccand2)->uses.length ();
3167 return n2 - n1;
3168 }
3169
3170 /* Adds IV candidates based on common candidated recorded. */
3171
3172 static void
3173 add_iv_candidate_derived_from_uses (struct ivopts_data *data)
3174 {
3175 unsigned i, j;
3176 struct iv_cand *cand_1, *cand_2;
3177
3178 data->iv_common_cands.qsort (common_cand_cmp);
3179 for (i = 0; i < data->iv_common_cands.length (); i++)
3180 {
3181 struct iv_common_cand *ptr = data->iv_common_cands[i];
3182
3183 /* Only add IV candidate if it's derived from multiple uses. */
3184 if (ptr->uses.length () <= 1)
3185 break;
3186
3187 cand_1 = NULL;
3188 cand_2 = NULL;
3189 if (ip_normal_pos (data->current_loop))
3190 cand_1 = add_candidate_1 (data, ptr->base, ptr->step,
3191 false, IP_NORMAL, NULL, NULL);
3192
3193 if (ip_end_pos (data->current_loop)
3194 && allow_ip_end_pos_p (data->current_loop))
3195 cand_2 = add_candidate_1 (data, ptr->base, ptr->step,
3196 false, IP_END, NULL, NULL);
3197
3198 /* Bind deriving uses and the new candidates. */
3199 for (j = 0; j < ptr->uses.length (); j++)
3200 {
3201 struct iv_use *use = ptr->uses[j];
3202 if (cand_1)
3203 bitmap_set_bit (use->related_cands, cand_1->id);
3204 if (cand_2)
3205 bitmap_set_bit (use->related_cands, cand_2->id);
3206 }
3207 }
3208
3209 /* Release data since it is useless from this point. */
3210 data->iv_common_cand_tab->empty ();
3211 data->iv_common_cands.truncate (0);
3212 }
3213
3214 /* Adds candidates based on the value of USE's iv. */
3215
3216 static void
3217 add_iv_candidate_for_use (struct ivopts_data *data, struct iv_use *use)
3218 {
3219 unsigned HOST_WIDE_INT offset;
3220 tree base;
3221 tree basetype;
3222 struct iv *iv = use->iv;
3223
3224 add_candidate (data, iv->base, iv->step, false, use);
3225
3226 /* Record common candidate for use in case it can be shared by others. */
3227 record_common_cand (data, iv->base, iv->step, use);
3228
3229 /* Record common candidate with initial value zero. */
3230 basetype = TREE_TYPE (iv->base);
3231 if (POINTER_TYPE_P (basetype))
3232 basetype = sizetype;
3233 record_common_cand (data, build_int_cst (basetype, 0), iv->step, use);
3234
3235 /* Record common candidate with constant offset stripped in base. */
3236 {
3237 base = strip_offset (iv->base, &offset);
3238 if (offset || base != iv->base)
3239 record_common_cand (data, base, iv->step, use);
3240 }
3241
3242 /* Record common candidate with base_object removed in base. */
3243 if (iv->base_object != NULL)
3244 {
3245 unsigned i;
3246 aff_tree aff_base;
3247 tree step, base_object = iv->base_object;
3248
3249 base = iv->base;
3250 step = iv->step;
3251 STRIP_NOPS (base);
3252 STRIP_NOPS (step);
3253 STRIP_NOPS (base_object);
3254 tree_to_aff_combination (base, TREE_TYPE (base), &aff_base);
3255 for (i = 0; i < aff_base.n; i++)
3256 {
3257 if (aff_base.elts[i].coef != 1)
3258 continue;
3259
3260 if (operand_equal_p (aff_base.elts[i].val, base_object, 0))
3261 break;
3262 }
3263 if (i < aff_base.n)
3264 {
3265 aff_combination_remove_elt (&aff_base, i);
3266 base = aff_combination_to_tree (&aff_base);
3267 basetype = TREE_TYPE (base);
3268 if (POINTER_TYPE_P (basetype))
3269 basetype = sizetype;
3270
3271 step = fold_convert (basetype, step);
3272 record_common_cand (data, base, step, use);
3273 /* Also record common candidate with offset stripped. */
3274 base = strip_offset (base, &offset);
3275 if (offset)
3276 record_common_cand (data, base, step, use);
3277 }
3278 }
3279
3280 /* At last, add auto-incremental candidates. Make such variables
3281 important since other iv uses with same base object may be based
3282 on it. */
3283 if (use != NULL && use->type == USE_ADDRESS)
3284 add_autoinc_candidates (data, iv->base, iv->step, true, use);
3285 }
3286
3287 /* Adds candidates based on the uses. */
3288
3289 static void
3290 add_iv_candidate_for_uses (struct ivopts_data *data)
3291 {
3292 unsigned i;
3293
3294 for (i = 0; i < n_iv_uses (data); i++)
3295 {
3296 struct iv_use *use = iv_use (data, i);
3297
3298 if (!use)
3299 continue;
3300
3301 switch (use->type)
3302 {
3303 case USE_NONLINEAR_EXPR:
3304 case USE_COMPARE:
3305 case USE_ADDRESS:
3306 /* Just add the ivs based on the value of the iv used here. */
3307 add_iv_candidate_for_use (data, use);
3308 break;
3309
3310 default:
3311 gcc_unreachable ();
3312 }
3313 }
3314 add_iv_candidate_derived_from_uses (data);
3315 }
3316
3317 /* Record important candidates and add them to related_cands bitmaps. */
3318
3319 static void
3320 record_important_candidates (struct ivopts_data *data)
3321 {
3322 unsigned i;
3323 struct iv_use *use;
3324
3325 for (i = 0; i < n_iv_cands (data); i++)
3326 {
3327 struct iv_cand *cand = iv_cand (data, i);
3328
3329 if (cand->important)
3330 bitmap_set_bit (data->important_candidates, i);
3331 }
3332
3333 data->consider_all_candidates = (n_iv_cands (data)
3334 <= CONSIDER_ALL_CANDIDATES_BOUND);
3335
3336 /* Add important candidates to uses' related_cands bitmaps. */
3337 for (i = 0; i < n_iv_uses (data); i++)
3338 {
3339 use = iv_use (data, i);
3340 bitmap_ior_into (use->related_cands, data->important_candidates);
3341 }
3342 }
3343
3344 /* Allocates the data structure mapping the (use, candidate) pairs to costs.
3345 If consider_all_candidates is true, we use a two-dimensional array, otherwise
3346 we allocate a simple list to every use. */
3347
3348 static void
3349 alloc_use_cost_map (struct ivopts_data *data)
3350 {
3351 unsigned i, size, s;
3352
3353 for (i = 0; i < n_iv_uses (data); i++)
3354 {
3355 struct iv_use *use = iv_use (data, i);
3356
3357 if (data->consider_all_candidates)
3358 size = n_iv_cands (data);
3359 else
3360 {
3361 s = bitmap_count_bits (use->related_cands);
3362
3363 /* Round up to the power of two, so that moduling by it is fast. */
3364 size = s ? (1 << ceil_log2 (s)) : 1;
3365 }
3366
3367 use->n_map_members = size;
3368 use->cost_map = XCNEWVEC (struct cost_pair, size);
3369 }
3370 }
3371
3372 /* Returns description of computation cost of expression whose runtime
3373 cost is RUNTIME and complexity corresponds to COMPLEXITY. */
3374
3375 static comp_cost
3376 new_cost (unsigned runtime, unsigned complexity)
3377 {
3378 comp_cost cost;
3379
3380 cost.cost = runtime;
3381 cost.complexity = complexity;
3382
3383 return cost;
3384 }
3385
3386 /* Returns true if COST is infinite. */
3387
3388 static bool
3389 infinite_cost_p (comp_cost cost)
3390 {
3391 return cost.cost == INFTY;
3392 }
3393
3394 /* Adds costs COST1 and COST2. */
3395
3396 static comp_cost
3397 add_costs (comp_cost cost1, comp_cost cost2)
3398 {
3399 if (infinite_cost_p (cost1) || infinite_cost_p (cost2))
3400 return infinite_cost;
3401
3402 cost1.cost += cost2.cost;
3403 cost1.complexity += cost2.complexity;
3404
3405 return cost1;
3406 }
3407 /* Subtracts costs COST1 and COST2. */
3408
3409 static comp_cost
3410 sub_costs (comp_cost cost1, comp_cost cost2)
3411 {
3412 cost1.cost -= cost2.cost;
3413 cost1.complexity -= cost2.complexity;
3414
3415 return cost1;
3416 }
3417
3418 /* Returns a negative number if COST1 < COST2, a positive number if
3419 COST1 > COST2, and 0 if COST1 = COST2. */
3420
3421 static int
3422 compare_costs (comp_cost cost1, comp_cost cost2)
3423 {
3424 if (cost1.cost == cost2.cost)
3425 return cost1.complexity - cost2.complexity;
3426
3427 return cost1.cost - cost2.cost;
3428 }
3429
3430 /* Sets cost of (USE, CANDIDATE) pair to COST and record that it depends
3431 on invariants DEPENDS_ON and that the value used in expressing it
3432 is VALUE, and in case of iv elimination the comparison operator is COMP. */
3433
3434 static void
3435 set_use_iv_cost (struct ivopts_data *data,
3436 struct iv_use *use, struct iv_cand *cand,
3437 comp_cost cost, bitmap depends_on, tree value,
3438 enum tree_code comp, int inv_expr_id)
3439 {
3440 unsigned i, s;
3441
3442 if (infinite_cost_p (cost))
3443 {
3444 BITMAP_FREE (depends_on);
3445 return;
3446 }
3447
3448 if (data->consider_all_candidates)
3449 {
3450 use->cost_map[cand->id].cand = cand;
3451 use->cost_map[cand->id].cost = cost;
3452 use->cost_map[cand->id].depends_on = depends_on;
3453 use->cost_map[cand->id].value = value;
3454 use->cost_map[cand->id].comp = comp;
3455 use->cost_map[cand->id].inv_expr_id = inv_expr_id;
3456 return;
3457 }
3458
3459 /* n_map_members is a power of two, so this computes modulo. */
3460 s = cand->id & (use->n_map_members - 1);
3461 for (i = s; i < use->n_map_members; i++)
3462 if (!use->cost_map[i].cand)
3463 goto found;
3464 for (i = 0; i < s; i++)
3465 if (!use->cost_map[i].cand)
3466 goto found;
3467
3468 gcc_unreachable ();
3469
3470 found:
3471 use->cost_map[i].cand = cand;
3472 use->cost_map[i].cost = cost;
3473 use->cost_map[i].depends_on = depends_on;
3474 use->cost_map[i].value = value;
3475 use->cost_map[i].comp = comp;
3476 use->cost_map[i].inv_expr_id = inv_expr_id;
3477 }
3478
3479 /* Gets cost of (USE, CANDIDATE) pair. */
3480
3481 static struct cost_pair *
3482 get_use_iv_cost (struct ivopts_data *data, struct iv_use *use,
3483 struct iv_cand *cand)
3484 {
3485 unsigned i, s;
3486 struct cost_pair *ret;
3487
3488 if (!cand)
3489 return NULL;
3490
3491 if (data->consider_all_candidates)
3492 {
3493 ret = use->cost_map + cand->id;
3494 if (!ret->cand)
3495 return NULL;
3496
3497 return ret;
3498 }
3499
3500 /* n_map_members is a power of two, so this computes modulo. */
3501 s = cand->id & (use->n_map_members - 1);
3502 for (i = s; i < use->n_map_members; i++)
3503 if (use->cost_map[i].cand == cand)
3504 return use->cost_map + i;
3505 else if (use->cost_map[i].cand == NULL)
3506 return NULL;
3507 for (i = 0; i < s; i++)
3508 if (use->cost_map[i].cand == cand)
3509 return use->cost_map + i;
3510 else if (use->cost_map[i].cand == NULL)
3511 return NULL;
3512
3513 return NULL;
3514 }
3515
3516 /* Produce DECL_RTL for object obj so it looks like it is stored in memory. */
3517 static rtx
3518 produce_memory_decl_rtl (tree obj, int *regno)
3519 {
3520 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (obj));
3521 machine_mode address_mode = targetm.addr_space.address_mode (as);
3522 rtx x;
3523
3524 gcc_assert (obj);
3525 if (TREE_STATIC (obj) || DECL_EXTERNAL (obj))
3526 {
3527 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (obj));
3528 x = gen_rtx_SYMBOL_REF (address_mode, name);
3529 SET_SYMBOL_REF_DECL (x, obj);
3530 x = gen_rtx_MEM (DECL_MODE (obj), x);
3531 set_mem_addr_space (x, as);
3532 targetm.encode_section_info (obj, x, true);
3533 }
3534 else
3535 {
3536 x = gen_raw_REG (address_mode, (*regno)++);
3537 x = gen_rtx_MEM (DECL_MODE (obj), x);
3538 set_mem_addr_space (x, as);
3539 }
3540
3541 return x;
3542 }
3543
3544 /* Prepares decl_rtl for variables referred in *EXPR_P. Callback for
3545 walk_tree. DATA contains the actual fake register number. */
3546
3547 static tree
3548 prepare_decl_rtl (tree *expr_p, int *ws, void *data)
3549 {
3550 tree obj = NULL_TREE;
3551 rtx x = NULL_RTX;
3552 int *regno = (int *) data;
3553
3554 switch (TREE_CODE (*expr_p))
3555 {
3556 case ADDR_EXPR:
3557 for (expr_p = &TREE_OPERAND (*expr_p, 0);
3558 handled_component_p (*expr_p);
3559 expr_p = &TREE_OPERAND (*expr_p, 0))
3560 continue;
3561 obj = *expr_p;
3562 if (DECL_P (obj) && HAS_RTL_P (obj) && !DECL_RTL_SET_P (obj))
3563 x = produce_memory_decl_rtl (obj, regno);
3564 break;
3565
3566 case SSA_NAME:
3567 *ws = 0;
3568 obj = SSA_NAME_VAR (*expr_p);
3569 /* Defer handling of anonymous SSA_NAMEs to the expander. */
3570 if (!obj)
3571 return NULL_TREE;
3572 if (!DECL_RTL_SET_P (obj))
3573 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3574 break;
3575
3576 case VAR_DECL:
3577 case PARM_DECL:
3578 case RESULT_DECL:
3579 *ws = 0;
3580 obj = *expr_p;
3581
3582 if (DECL_RTL_SET_P (obj))
3583 break;
3584
3585 if (DECL_MODE (obj) == BLKmode)
3586 x = produce_memory_decl_rtl (obj, regno);
3587 else
3588 x = gen_raw_REG (DECL_MODE (obj), (*regno)++);
3589
3590 break;
3591
3592 default:
3593 break;
3594 }
3595
3596 if (x)
3597 {
3598 decl_rtl_to_reset.safe_push (obj);
3599 SET_DECL_RTL (obj, x);
3600 }
3601
3602 return NULL_TREE;
3603 }
3604
3605 /* Determines cost of the computation of EXPR. */
3606
3607 static unsigned
3608 computation_cost (tree expr, bool speed)
3609 {
3610 rtx_insn *seq;
3611 rtx rslt;
3612 tree type = TREE_TYPE (expr);
3613 unsigned cost;
3614 /* Avoid using hard regs in ways which may be unsupported. */
3615 int regno = LAST_VIRTUAL_REGISTER + 1;
3616 struct cgraph_node *node = cgraph_node::get (current_function_decl);
3617 enum node_frequency real_frequency = node->frequency;
3618
3619 node->frequency = NODE_FREQUENCY_NORMAL;
3620 crtl->maybe_hot_insn_p = speed;
3621 walk_tree (&expr, prepare_decl_rtl, &regno, NULL);
3622 start_sequence ();
3623 rslt = expand_expr (expr, NULL_RTX, TYPE_MODE (type), EXPAND_NORMAL);
3624 seq = get_insns ();
3625 end_sequence ();
3626 default_rtl_profile ();
3627 node->frequency = real_frequency;
3628
3629 cost = seq_cost (seq, speed);
3630 if (MEM_P (rslt))
3631 cost += address_cost (XEXP (rslt, 0), TYPE_MODE (type),
3632 TYPE_ADDR_SPACE (type), speed);
3633 else if (!REG_P (rslt))
3634 cost += set_src_cost (rslt, TYPE_MODE (type), speed);
3635
3636 return cost;
3637 }
3638
3639 /* Returns variable containing the value of candidate CAND at statement AT. */
3640
3641 static tree
3642 var_at_stmt (struct loop *loop, struct iv_cand *cand, gimple *stmt)
3643 {
3644 if (stmt_after_increment (loop, cand, stmt))
3645 return cand->var_after;
3646 else
3647 return cand->var_before;
3648 }
3649
3650 /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
3651 same precision that is at least as wide as the precision of TYPE, stores
3652 BA to A and BB to B, and returns the type of BA. Otherwise, returns the
3653 type of A and B. */
3654
3655 static tree
3656 determine_common_wider_type (tree *a, tree *b)
3657 {
3658 tree wider_type = NULL;
3659 tree suba, subb;
3660 tree atype = TREE_TYPE (*a);
3661
3662 if (CONVERT_EXPR_P (*a))
3663 {
3664 suba = TREE_OPERAND (*a, 0);
3665 wider_type = TREE_TYPE (suba);
3666 if (TYPE_PRECISION (wider_type) < TYPE_PRECISION (atype))
3667 return atype;
3668 }
3669 else
3670 return atype;
3671
3672 if (CONVERT_EXPR_P (*b))
3673 {
3674 subb = TREE_OPERAND (*b, 0);
3675 if (TYPE_PRECISION (wider_type) != TYPE_PRECISION (TREE_TYPE (subb)))
3676 return atype;
3677 }
3678 else
3679 return atype;
3680
3681 *a = suba;
3682 *b = subb;
3683 return wider_type;
3684 }
3685
3686 /* Determines the expression by that USE is expressed from induction variable
3687 CAND at statement AT in LOOP. The expression is stored in a decomposed
3688 form into AFF. Returns false if USE cannot be expressed using CAND. */
3689
3690 static bool
3691 get_computation_aff (struct loop *loop,
3692 struct iv_use *use, struct iv_cand *cand, gimple *at,
3693 struct aff_tree *aff)
3694 {
3695 tree ubase = use->iv->base;
3696 tree ustep = use->iv->step;
3697 tree cbase = cand->iv->base;
3698 tree cstep = cand->iv->step, cstep_common;
3699 tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
3700 tree common_type, var;
3701 tree uutype;
3702 aff_tree cbase_aff, var_aff;
3703 widest_int rat;
3704
3705 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
3706 {
3707 /* We do not have a precision to express the values of use. */
3708 return false;
3709 }
3710
3711 var = var_at_stmt (loop, cand, at);
3712 uutype = unsigned_type_for (utype);
3713
3714 /* If the conversion is not noop, perform it. */
3715 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
3716 {
3717 if (cand->orig_iv != NULL && CONVERT_EXPR_P (cbase)
3718 && (CONVERT_EXPR_P (cstep) || TREE_CODE (cstep) == INTEGER_CST))
3719 {
3720 tree inner_base, inner_step, inner_type;
3721 inner_base = TREE_OPERAND (cbase, 0);
3722 if (CONVERT_EXPR_P (cstep))
3723 inner_step = TREE_OPERAND (cstep, 0);
3724 else
3725 inner_step = cstep;
3726
3727 inner_type = TREE_TYPE (inner_base);
3728 /* If candidate is added from a biv whose type is smaller than
3729 ctype, we know both candidate and the biv won't overflow.
3730 In this case, it's safe to skip the convertion in candidate.
3731 As an example, (unsigned short)((unsigned long)A) equals to
3732 (unsigned short)A, if A has a type no larger than short. */
3733 if (TYPE_PRECISION (inner_type) <= TYPE_PRECISION (uutype))
3734 {
3735 cbase = inner_base;
3736 cstep = inner_step;
3737 }
3738 }
3739 cstep = fold_convert (uutype, cstep);
3740 cbase = fold_convert (uutype, cbase);
3741 var = fold_convert (uutype, var);
3742 }
3743
3744 if (!constant_multiple_of (ustep, cstep, &rat))
3745 return false;
3746
3747 /* In case both UBASE and CBASE are shortened to UUTYPE from some common
3748 type, we achieve better folding by computing their difference in this
3749 wider type, and cast the result to UUTYPE. We do not need to worry about
3750 overflows, as all the arithmetics will in the end be performed in UUTYPE
3751 anyway. */
3752 common_type = determine_common_wider_type (&ubase, &cbase);
3753
3754 /* use = ubase - ratio * cbase + ratio * var. */
3755 tree_to_aff_combination (ubase, common_type, aff);
3756 tree_to_aff_combination (cbase, common_type, &cbase_aff);
3757 tree_to_aff_combination (var, uutype, &var_aff);
3758
3759 /* We need to shift the value if we are after the increment. */
3760 if (stmt_after_increment (loop, cand, at))
3761 {
3762 aff_tree cstep_aff;
3763
3764 if (common_type != uutype)
3765 cstep_common = fold_convert (common_type, cstep);
3766 else
3767 cstep_common = cstep;
3768
3769 tree_to_aff_combination (cstep_common, common_type, &cstep_aff);
3770 aff_combination_add (&cbase_aff, &cstep_aff);
3771 }
3772
3773 aff_combination_scale (&cbase_aff, -rat);
3774 aff_combination_add (aff, &cbase_aff);
3775 if (common_type != uutype)
3776 aff_combination_convert (aff, uutype);
3777
3778 aff_combination_scale (&var_aff, rat);
3779 aff_combination_add (aff, &var_aff);
3780
3781 return true;
3782 }
3783
3784 /* Return the type of USE. */
3785
3786 static tree
3787 get_use_type (struct iv_use *use)
3788 {
3789 tree base_type = TREE_TYPE (use->iv->base);
3790 tree type;
3791
3792 if (use->type == USE_ADDRESS)
3793 {
3794 /* The base_type may be a void pointer. Create a pointer type based on
3795 the mem_ref instead. */
3796 type = build_pointer_type (TREE_TYPE (*use->op_p));
3797 gcc_assert (TYPE_ADDR_SPACE (TREE_TYPE (type))
3798 == TYPE_ADDR_SPACE (TREE_TYPE (base_type)));
3799 }
3800 else
3801 type = base_type;
3802
3803 return type;
3804 }
3805
3806 /* Determines the expression by that USE is expressed from induction variable
3807 CAND at statement AT in LOOP. The computation is unshared. */
3808
3809 static tree
3810 get_computation_at (struct loop *loop,
3811 struct iv_use *use, struct iv_cand *cand, gimple *at)
3812 {
3813 aff_tree aff;
3814 tree type = get_use_type (use);
3815
3816 if (!get_computation_aff (loop, use, cand, at, &aff))
3817 return NULL_TREE;
3818 unshare_aff_combination (&aff);
3819 return fold_convert (type, aff_combination_to_tree (&aff));
3820 }
3821
3822 /* Determines the expression by that USE is expressed from induction variable
3823 CAND in LOOP. The computation is unshared. */
3824
3825 static tree
3826 get_computation (struct loop *loop, struct iv_use *use, struct iv_cand *cand)
3827 {
3828 return get_computation_at (loop, use, cand, use->stmt);
3829 }
3830
3831 /* Adjust the cost COST for being in loop setup rather than loop body.
3832 If we're optimizing for space, the loop setup overhead is constant;
3833 if we're optimizing for speed, amortize it over the per-iteration cost. */
3834 static unsigned
3835 adjust_setup_cost (struct ivopts_data *data, unsigned cost)
3836 {
3837 if (cost == INFTY)
3838 return cost;
3839 else if (optimize_loop_for_speed_p (data->current_loop))
3840 return cost / avg_loop_niter (data->current_loop);
3841 else
3842 return cost;
3843 }
3844
3845 /* Returns true if multiplying by RATIO is allowed in an address. Test the
3846 validity for a memory reference accessing memory of mode MODE in
3847 address space AS. */
3848
3849
3850 bool
3851 multiplier_allowed_in_address_p (HOST_WIDE_INT ratio, machine_mode mode,
3852 addr_space_t as)
3853 {
3854 #define MAX_RATIO 128
3855 unsigned int data_index = (int) as * MAX_MACHINE_MODE + (int) mode;
3856 static vec<sbitmap> valid_mult_list;
3857 sbitmap valid_mult;
3858
3859 if (data_index >= valid_mult_list.length ())
3860 valid_mult_list.safe_grow_cleared (data_index + 1);
3861
3862 valid_mult = valid_mult_list[data_index];
3863 if (!valid_mult)
3864 {
3865 machine_mode address_mode = targetm.addr_space.address_mode (as);
3866 rtx reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
3867 rtx reg2 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
3868 rtx addr, scaled;
3869 HOST_WIDE_INT i;
3870
3871 valid_mult = sbitmap_alloc (2 * MAX_RATIO + 1);
3872 bitmap_clear (valid_mult);
3873 scaled = gen_rtx_fmt_ee (MULT, address_mode, reg1, NULL_RTX);
3874 addr = gen_rtx_fmt_ee (PLUS, address_mode, scaled, reg2);
3875 for (i = -MAX_RATIO; i <= MAX_RATIO; i++)
3876 {
3877 XEXP (scaled, 1) = gen_int_mode (i, address_mode);
3878 if (memory_address_addr_space_p (mode, addr, as)
3879 || memory_address_addr_space_p (mode, scaled, as))
3880 bitmap_set_bit (valid_mult, i + MAX_RATIO);
3881 }
3882
3883 if (dump_file && (dump_flags & TDF_DETAILS))
3884 {
3885 fprintf (dump_file, " allowed multipliers:");
3886 for (i = -MAX_RATIO; i <= MAX_RATIO; i++)
3887 if (bitmap_bit_p (valid_mult, i + MAX_RATIO))
3888 fprintf (dump_file, " %d", (int) i);
3889 fprintf (dump_file, "\n");
3890 fprintf (dump_file, "\n");
3891 }
3892
3893 valid_mult_list[data_index] = valid_mult;
3894 }
3895
3896 if (ratio > MAX_RATIO || ratio < -MAX_RATIO)
3897 return false;
3898
3899 return bitmap_bit_p (valid_mult, ratio + MAX_RATIO);
3900 }
3901
3902 /* Returns cost of address in shape symbol + var + OFFSET + RATIO * index.
3903 If SYMBOL_PRESENT is false, symbol is omitted. If VAR_PRESENT is false,
3904 variable is omitted. Compute the cost for a memory reference that accesses
3905 a memory location of mode MEM_MODE in address space AS.
3906
3907 MAY_AUTOINC is set to true if the autoincrement (increasing index by
3908 size of MEM_MODE / RATIO) is available. To make this determination, we
3909 look at the size of the increment to be made, which is given in CSTEP.
3910 CSTEP may be zero if the step is unknown.
3911 STMT_AFTER_INC is true iff the statement we're looking at is after the
3912 increment of the original biv.
3913
3914 TODO -- there must be some better way. This all is quite crude. */
3915
3916 enum ainc_type
3917 {
3918 AINC_PRE_INC, /* Pre increment. */
3919 AINC_PRE_DEC, /* Pre decrement. */
3920 AINC_POST_INC, /* Post increment. */
3921 AINC_POST_DEC, /* Post decrement. */
3922 AINC_NONE /* Also the number of auto increment types. */
3923 };
3924
3925 struct address_cost_data
3926 {
3927 HOST_WIDE_INT min_offset, max_offset;
3928 unsigned costs[2][2][2][2];
3929 unsigned ainc_costs[AINC_NONE];
3930 };
3931
3932
3933 static comp_cost
3934 get_address_cost (bool symbol_present, bool var_present,
3935 unsigned HOST_WIDE_INT offset, HOST_WIDE_INT ratio,
3936 HOST_WIDE_INT cstep, machine_mode mem_mode,
3937 addr_space_t as, bool speed,
3938 bool stmt_after_inc, bool *may_autoinc)
3939 {
3940 machine_mode address_mode = targetm.addr_space.address_mode (as);
3941 static vec<address_cost_data *> address_cost_data_list;
3942 unsigned int data_index = (int) as * MAX_MACHINE_MODE + (int) mem_mode;
3943 address_cost_data *data;
3944 static bool has_preinc[MAX_MACHINE_MODE], has_postinc[MAX_MACHINE_MODE];
3945 static bool has_predec[MAX_MACHINE_MODE], has_postdec[MAX_MACHINE_MODE];
3946 unsigned cost, acost, complexity;
3947 enum ainc_type autoinc_type;
3948 bool offset_p, ratio_p, autoinc;
3949 HOST_WIDE_INT s_offset, autoinc_offset, msize;
3950 unsigned HOST_WIDE_INT mask;
3951 unsigned bits;
3952
3953 if (data_index >= address_cost_data_list.length ())
3954 address_cost_data_list.safe_grow_cleared (data_index + 1);
3955
3956 data = address_cost_data_list[data_index];
3957 if (!data)
3958 {
3959 HOST_WIDE_INT i;
3960 HOST_WIDE_INT rat, off = 0;
3961 int old_cse_not_expected, width;
3962 unsigned sym_p, var_p, off_p, rat_p, add_c;
3963 rtx_insn *seq;
3964 rtx addr, base;
3965 rtx reg0, reg1;
3966
3967 data = (address_cost_data *) xcalloc (1, sizeof (*data));
3968
3969 reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
3970
3971 width = GET_MODE_BITSIZE (address_mode) - 1;
3972 if (width > (HOST_BITS_PER_WIDE_INT - 1))
3973 width = HOST_BITS_PER_WIDE_INT - 1;
3974 addr = gen_rtx_fmt_ee (PLUS, address_mode, reg1, NULL_RTX);
3975
3976 for (i = width; i >= 0; i--)
3977 {
3978 off = -((unsigned HOST_WIDE_INT) 1 << i);
3979 XEXP (addr, 1) = gen_int_mode (off, address_mode);
3980 if (memory_address_addr_space_p (mem_mode, addr, as))
3981 break;
3982 }
3983 data->min_offset = (i == -1? 0 : off);
3984
3985 for (i = width; i >= 0; i--)
3986 {
3987 off = ((unsigned HOST_WIDE_INT) 1 << i) - 1;
3988 XEXP (addr, 1) = gen_int_mode (off, address_mode);
3989 if (memory_address_addr_space_p (mem_mode, addr, as))
3990 break;
3991 /* For some strict-alignment targets, the offset must be naturally
3992 aligned. Try an aligned offset if mem_mode is not QImode. */
3993 off = mem_mode != QImode
3994 ? ((unsigned HOST_WIDE_INT) 1 << i)
3995 - GET_MODE_SIZE (mem_mode)
3996 : 0;
3997 if (off > 0)
3998 {
3999 XEXP (addr, 1) = gen_int_mode (off, address_mode);
4000 if (memory_address_addr_space_p (mem_mode, addr, as))
4001 break;
4002 }
4003 }
4004 if (i == -1)
4005 off = 0;
4006 data->max_offset = off;
4007
4008 if (dump_file && (dump_flags & TDF_DETAILS))
4009 {
4010 fprintf (dump_file, "get_address_cost:\n");
4011 fprintf (dump_file, " min offset %s " HOST_WIDE_INT_PRINT_DEC "\n",
4012 GET_MODE_NAME (mem_mode),
4013 data->min_offset);
4014 fprintf (dump_file, " max offset %s " HOST_WIDE_INT_PRINT_DEC "\n",
4015 GET_MODE_NAME (mem_mode),
4016 data->max_offset);
4017 }
4018
4019 rat = 1;
4020 for (i = 2; i <= MAX_RATIO; i++)
4021 if (multiplier_allowed_in_address_p (i, mem_mode, as))
4022 {
4023 rat = i;
4024 break;
4025 }
4026
4027 /* Compute the cost of various addressing modes. */
4028 acost = 0;
4029 reg0 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
4030 reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2);
4031
4032 if (USE_LOAD_PRE_DECREMENT (mem_mode)
4033 || USE_STORE_PRE_DECREMENT (mem_mode))
4034 {
4035 addr = gen_rtx_PRE_DEC (address_mode, reg0);
4036 has_predec[mem_mode]
4037 = memory_address_addr_space_p (mem_mode, addr, as);
4038
4039 if (has_predec[mem_mode])
4040 data->ainc_costs[AINC_PRE_DEC]
4041 = address_cost (addr, mem_mode, as, speed);
4042 }
4043 if (USE_LOAD_POST_DECREMENT (mem_mode)
4044 || USE_STORE_POST_DECREMENT (mem_mode))
4045 {
4046 addr = gen_rtx_POST_DEC (address_mode, reg0);
4047 has_postdec[mem_mode]
4048 = memory_address_addr_space_p (mem_mode, addr, as);
4049
4050 if (has_postdec[mem_mode])
4051 data->ainc_costs[AINC_POST_DEC]
4052 = address_cost (addr, mem_mode, as, speed);
4053 }
4054 if (USE_LOAD_PRE_INCREMENT (mem_mode)
4055 || USE_STORE_PRE_DECREMENT (mem_mode))
4056 {
4057 addr = gen_rtx_PRE_INC (address_mode, reg0);
4058 has_preinc[mem_mode]
4059 = memory_address_addr_space_p (mem_mode, addr, as);
4060
4061 if (has_preinc[mem_mode])
4062 data->ainc_costs[AINC_PRE_INC]
4063 = address_cost (addr, mem_mode, as, speed);
4064 }
4065 if (USE_LOAD_POST_INCREMENT (mem_mode)
4066 || USE_STORE_POST_INCREMENT (mem_mode))
4067 {
4068 addr = gen_rtx_POST_INC (address_mode, reg0);
4069 has_postinc[mem_mode]
4070 = memory_address_addr_space_p (mem_mode, addr, as);
4071
4072 if (has_postinc[mem_mode])
4073 data->ainc_costs[AINC_POST_INC]
4074 = address_cost (addr, mem_mode, as, speed);
4075 }
4076 for (i = 0; i < 16; i++)
4077 {
4078 sym_p = i & 1;
4079 var_p = (i >> 1) & 1;
4080 off_p = (i >> 2) & 1;
4081 rat_p = (i >> 3) & 1;
4082
4083 addr = reg0;
4084 if (rat_p)
4085 addr = gen_rtx_fmt_ee (MULT, address_mode, addr,
4086 gen_int_mode (rat, address_mode));
4087
4088 if (var_p)
4089 addr = gen_rtx_fmt_ee (PLUS, address_mode, addr, reg1);
4090
4091 if (sym_p)
4092 {
4093 base = gen_rtx_SYMBOL_REF (address_mode, ggc_strdup (""));
4094 /* ??? We can run into trouble with some backends by presenting
4095 it with symbols which haven't been properly passed through
4096 targetm.encode_section_info. By setting the local bit, we
4097 enhance the probability of things working. */
4098 SYMBOL_REF_FLAGS (base) = SYMBOL_FLAG_LOCAL;
4099
4100 if (off_p)
4101 base = gen_rtx_fmt_e (CONST, address_mode,
4102 gen_rtx_fmt_ee
4103 (PLUS, address_mode, base,
4104 gen_int_mode (off, address_mode)));
4105 }
4106 else if (off_p)
4107 base = gen_int_mode (off, address_mode);
4108 else
4109 base = NULL_RTX;
4110
4111 if (base)
4112 addr = gen_rtx_fmt_ee (PLUS, address_mode, addr, base);
4113
4114 start_sequence ();
4115 /* To avoid splitting addressing modes, pretend that no cse will
4116 follow. */
4117 old_cse_not_expected = cse_not_expected;
4118 cse_not_expected = true;
4119 addr = memory_address_addr_space (mem_mode, addr, as);
4120 cse_not_expected = old_cse_not_expected;
4121 seq = get_insns ();
4122 end_sequence ();
4123
4124 acost = seq_cost (seq, speed);
4125 acost += address_cost (addr, mem_mode, as, speed);
4126
4127 if (!acost)
4128 acost = 1;
4129 data->costs[sym_p][var_p][off_p][rat_p] = acost;
4130 }
4131
4132 /* On some targets, it is quite expensive to load symbol to a register,
4133 which makes addresses that contain symbols look much more expensive.
4134 However, the symbol will have to be loaded in any case before the
4135 loop (and quite likely we have it in register already), so it does not
4136 make much sense to penalize them too heavily. So make some final
4137 tweaks for the SYMBOL_PRESENT modes:
4138
4139 If VAR_PRESENT is false, and the mode obtained by changing symbol to
4140 var is cheaper, use this mode with small penalty.
4141 If VAR_PRESENT is true, try whether the mode with
4142 SYMBOL_PRESENT = false is cheaper even with cost of addition, and
4143 if this is the case, use it. */
4144 add_c = add_cost (speed, address_mode);
4145 for (i = 0; i < 8; i++)
4146 {
4147 var_p = i & 1;
4148 off_p = (i >> 1) & 1;
4149 rat_p = (i >> 2) & 1;
4150
4151 acost = data->costs[0][1][off_p][rat_p] + 1;
4152 if (var_p)
4153 acost += add_c;
4154
4155 if (acost < data->costs[1][var_p][off_p][rat_p])
4156 data->costs[1][var_p][off_p][rat_p] = acost;
4157 }
4158
4159 if (dump_file && (dump_flags & TDF_DETAILS))
4160 {
4161 fprintf (dump_file, "Address costs:\n");
4162
4163 for (i = 0; i < 16; i++)
4164 {
4165 sym_p = i & 1;
4166 var_p = (i >> 1) & 1;
4167 off_p = (i >> 2) & 1;
4168 rat_p = (i >> 3) & 1;
4169
4170 fprintf (dump_file, " ");
4171 if (sym_p)
4172 fprintf (dump_file, "sym + ");
4173 if (var_p)
4174 fprintf (dump_file, "var + ");
4175 if (off_p)
4176 fprintf (dump_file, "cst + ");
4177 if (rat_p)
4178 fprintf (dump_file, "rat * ");
4179
4180 acost = data->costs[sym_p][var_p][off_p][rat_p];
4181 fprintf (dump_file, "index costs %d\n", acost);
4182 }
4183 if (has_predec[mem_mode] || has_postdec[mem_mode]
4184 || has_preinc[mem_mode] || has_postinc[mem_mode])
4185 fprintf (dump_file, " May include autoinc/dec\n");
4186 fprintf (dump_file, "\n");
4187 }
4188
4189 address_cost_data_list[data_index] = data;
4190 }
4191
4192 bits = GET_MODE_BITSIZE (address_mode);
4193 mask = ~(~(unsigned HOST_WIDE_INT) 0 << (bits - 1) << 1);
4194 offset &= mask;
4195 if ((offset >> (bits - 1) & 1))
4196 offset |= ~mask;
4197 s_offset = offset;
4198
4199 autoinc = false;
4200 autoinc_type = AINC_NONE;
4201 msize = GET_MODE_SIZE (mem_mode);
4202 autoinc_offset = offset;
4203 if (stmt_after_inc)
4204 autoinc_offset += ratio * cstep;
4205 if (symbol_present || var_present || ratio != 1)
4206 autoinc = false;
4207 else
4208 {
4209 if (has_postinc[mem_mode] && autoinc_offset == 0
4210 && msize == cstep)
4211 autoinc_type = AINC_POST_INC;
4212 else if (has_postdec[mem_mode] && autoinc_offset == 0
4213 && msize == -cstep)
4214 autoinc_type = AINC_POST_DEC;
4215 else if (has_preinc[mem_mode] && autoinc_offset == msize
4216 && msize == cstep)
4217 autoinc_type = AINC_PRE_INC;
4218 else if (has_predec[mem_mode] && autoinc_offset == -msize
4219 && msize == -cstep)
4220 autoinc_type = AINC_PRE_DEC;
4221
4222 if (autoinc_type != AINC_NONE)
4223 autoinc = true;
4224 }
4225
4226 cost = 0;
4227 offset_p = (s_offset != 0
4228 && data->min_offset <= s_offset
4229 && s_offset <= data->max_offset);
4230 ratio_p = (ratio != 1
4231 && multiplier_allowed_in_address_p (ratio, mem_mode, as));
4232
4233 if (ratio != 1 && !ratio_p)
4234 cost += mult_by_coeff_cost (ratio, address_mode, speed);
4235
4236 if (s_offset && !offset_p && !symbol_present)
4237 cost += add_cost (speed, address_mode);
4238
4239 if (may_autoinc)
4240 *may_autoinc = autoinc;
4241 if (autoinc)
4242 acost = data->ainc_costs[autoinc_type];
4243 else
4244 acost = data->costs[symbol_present][var_present][offset_p][ratio_p];
4245 complexity = (symbol_present != 0) + (var_present != 0) + offset_p + ratio_p;
4246 return new_cost (cost + acost, complexity);
4247 }
4248
4249 /* Calculate the SPEED or size cost of shiftadd EXPR in MODE. MULT is the
4250 EXPR operand holding the shift. COST0 and COST1 are the costs for
4251 calculating the operands of EXPR. Returns true if successful, and returns
4252 the cost in COST. */
4253
4254 static bool
4255 get_shiftadd_cost (tree expr, machine_mode mode, comp_cost cost0,
4256 comp_cost cost1, tree mult, bool speed, comp_cost *cost)
4257 {
4258 comp_cost res;
4259 tree op1 = TREE_OPERAND (expr, 1);
4260 tree cst = TREE_OPERAND (mult, 1);
4261 tree multop = TREE_OPERAND (mult, 0);
4262 int m = exact_log2 (int_cst_value (cst));
4263 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
4264 int as_cost, sa_cost;
4265 bool mult_in_op1;
4266
4267 if (!(m >= 0 && m < maxm))
4268 return false;
4269
4270 STRIP_NOPS (op1);
4271 mult_in_op1 = operand_equal_p (op1, mult, 0);
4272
4273 as_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
4274
4275 /* If the target has a cheap shift-and-add or shift-and-sub instruction,
4276 use that in preference to a shift insn followed by an add insn. */
4277 sa_cost = (TREE_CODE (expr) != MINUS_EXPR
4278 ? shiftadd_cost (speed, mode, m)
4279 : (mult_in_op1
4280 ? shiftsub1_cost (speed, mode, m)
4281 : shiftsub0_cost (speed, mode, m)));
4282
4283 res = new_cost (MIN (as_cost, sa_cost), 0);
4284 res = add_costs (res, mult_in_op1 ? cost0 : cost1);
4285
4286 STRIP_NOPS (multop);
4287 if (!is_gimple_val (multop))
4288 res = add_costs (res, force_expr_to_var_cost (multop, speed));
4289
4290 *cost = res;
4291 return true;
4292 }
4293
4294 /* Estimates cost of forcing expression EXPR into a variable. */
4295
4296 static comp_cost
4297 force_expr_to_var_cost (tree expr, bool speed)
4298 {
4299 static bool costs_initialized = false;
4300 static unsigned integer_cost [2];
4301 static unsigned symbol_cost [2];
4302 static unsigned address_cost [2];
4303 tree op0, op1;
4304 comp_cost cost0, cost1, cost;
4305 machine_mode mode;
4306
4307 if (!costs_initialized)
4308 {
4309 tree type = build_pointer_type (integer_type_node);
4310 tree var, addr;
4311 rtx x;
4312 int i;
4313
4314 var = create_tmp_var_raw (integer_type_node, "test_var");
4315 TREE_STATIC (var) = 1;
4316 x = produce_memory_decl_rtl (var, NULL);
4317 SET_DECL_RTL (var, x);
4318
4319 addr = build1 (ADDR_EXPR, type, var);
4320
4321
4322 for (i = 0; i < 2; i++)
4323 {
4324 integer_cost[i] = computation_cost (build_int_cst (integer_type_node,
4325 2000), i);
4326
4327 symbol_cost[i] = computation_cost (addr, i) + 1;
4328
4329 address_cost[i]
4330 = computation_cost (fold_build_pointer_plus_hwi (addr, 2000), i) + 1;
4331 if (dump_file && (dump_flags & TDF_DETAILS))
4332 {
4333 fprintf (dump_file, "force_expr_to_var_cost %s costs:\n", i ? "speed" : "size");
4334 fprintf (dump_file, " integer %d\n", (int) integer_cost[i]);
4335 fprintf (dump_file, " symbol %d\n", (int) symbol_cost[i]);
4336 fprintf (dump_file, " address %d\n", (int) address_cost[i]);
4337 fprintf (dump_file, " other %d\n", (int) target_spill_cost[i]);
4338 fprintf (dump_file, "\n");
4339 }
4340 }
4341
4342 costs_initialized = true;
4343 }
4344
4345 STRIP_NOPS (expr);
4346
4347 if (SSA_VAR_P (expr))
4348 return no_cost;
4349
4350 if (is_gimple_min_invariant (expr))
4351 {
4352 if (TREE_CODE (expr) == INTEGER_CST)
4353 return new_cost (integer_cost [speed], 0);
4354
4355 if (TREE_CODE (expr) == ADDR_EXPR)
4356 {
4357 tree obj = TREE_OPERAND (expr, 0);
4358
4359 if (TREE_CODE (obj) == VAR_DECL
4360 || TREE_CODE (obj) == PARM_DECL
4361 || TREE_CODE (obj) == RESULT_DECL)
4362 return new_cost (symbol_cost [speed], 0);
4363 }
4364
4365 return new_cost (address_cost [speed], 0);
4366 }
4367
4368 switch (TREE_CODE (expr))
4369 {
4370 case POINTER_PLUS_EXPR:
4371 case PLUS_EXPR:
4372 case MINUS_EXPR:
4373 case MULT_EXPR:
4374 op0 = TREE_OPERAND (expr, 0);
4375 op1 = TREE_OPERAND (expr, 1);
4376 STRIP_NOPS (op0);
4377 STRIP_NOPS (op1);
4378 break;
4379
4380 CASE_CONVERT:
4381 case NEGATE_EXPR:
4382 op0 = TREE_OPERAND (expr, 0);
4383 STRIP_NOPS (op0);
4384 op1 = NULL_TREE;
4385 break;
4386
4387 default:
4388 /* Just an arbitrary value, FIXME. */
4389 return new_cost (target_spill_cost[speed], 0);
4390 }
4391
4392 if (op0 == NULL_TREE
4393 || TREE_CODE (op0) == SSA_NAME || CONSTANT_CLASS_P (op0))
4394 cost0 = no_cost;
4395 else
4396 cost0 = force_expr_to_var_cost (op0, speed);
4397
4398 if (op1 == NULL_TREE
4399 || TREE_CODE (op1) == SSA_NAME || CONSTANT_CLASS_P (op1))
4400 cost1 = no_cost;
4401 else
4402 cost1 = force_expr_to_var_cost (op1, speed);
4403
4404 mode = TYPE_MODE (TREE_TYPE (expr));
4405 switch (TREE_CODE (expr))
4406 {
4407 case POINTER_PLUS_EXPR:
4408 case PLUS_EXPR:
4409 case MINUS_EXPR:
4410 case NEGATE_EXPR:
4411 cost = new_cost (add_cost (speed, mode), 0);
4412 if (TREE_CODE (expr) != NEGATE_EXPR)
4413 {
4414 tree mult = NULL_TREE;
4415 comp_cost sa_cost;
4416 if (TREE_CODE (op1) == MULT_EXPR)
4417 mult = op1;
4418 else if (TREE_CODE (op0) == MULT_EXPR)
4419 mult = op0;
4420
4421 if (mult != NULL_TREE
4422 && cst_and_fits_in_hwi (TREE_OPERAND (mult, 1))
4423 && get_shiftadd_cost (expr, mode, cost0, cost1, mult,
4424 speed, &sa_cost))
4425 return sa_cost;
4426 }
4427 break;
4428
4429 CASE_CONVERT:
4430 {
4431 tree inner_mode, outer_mode;
4432 outer_mode = TREE_TYPE (expr);
4433 inner_mode = TREE_TYPE (op0);
4434 cost = new_cost (convert_cost (TYPE_MODE (outer_mode),
4435 TYPE_MODE (inner_mode), speed), 0);
4436 }
4437 break;
4438
4439 case MULT_EXPR:
4440 if (cst_and_fits_in_hwi (op0))
4441 cost = new_cost (mult_by_coeff_cost (int_cst_value (op0),
4442 mode, speed), 0);
4443 else if (cst_and_fits_in_hwi (op1))
4444 cost = new_cost (mult_by_coeff_cost (int_cst_value (op1),
4445 mode, speed), 0);
4446 else
4447 return new_cost (target_spill_cost [speed], 0);
4448 break;
4449
4450 default:
4451 gcc_unreachable ();
4452 }
4453
4454 cost = add_costs (cost, cost0);
4455 cost = add_costs (cost, cost1);
4456
4457 /* Bound the cost by target_spill_cost. The parts of complicated
4458 computations often are either loop invariant or at least can
4459 be shared between several iv uses, so letting this grow without
4460 limits would not give reasonable results. */
4461 if (cost.cost > (int) target_spill_cost [speed])
4462 cost.cost = target_spill_cost [speed];
4463
4464 return cost;
4465 }
4466
4467 /* Estimates cost of forcing EXPR into a variable. DEPENDS_ON is a set of the
4468 invariants the computation depends on. */
4469
4470 static comp_cost
4471 force_var_cost (struct ivopts_data *data,
4472 tree expr, bitmap *depends_on)
4473 {
4474 if (depends_on)
4475 {
4476 fd_ivopts_data = data;
4477 walk_tree (&expr, find_depends, depends_on, NULL);
4478 }
4479
4480 return force_expr_to_var_cost (expr, data->speed);
4481 }
4482
4483 /* Estimates cost of expressing address ADDR as var + symbol + offset. The
4484 value of offset is added to OFFSET, SYMBOL_PRESENT and VAR_PRESENT are set
4485 to false if the corresponding part is missing. DEPENDS_ON is a set of the
4486 invariants the computation depends on. */
4487
4488 static comp_cost
4489 split_address_cost (struct ivopts_data *data,
4490 tree addr, bool *symbol_present, bool *var_present,
4491 unsigned HOST_WIDE_INT *offset, bitmap *depends_on)
4492 {
4493 tree core;
4494 HOST_WIDE_INT bitsize;
4495 HOST_WIDE_INT bitpos;
4496 tree toffset;
4497 machine_mode mode;
4498 int unsignedp, reversep, volatilep;
4499
4500 core = get_inner_reference (addr, &bitsize, &bitpos, &toffset, &mode,
4501 &unsignedp, &reversep, &volatilep, false);
4502
4503 if (toffset != 0
4504 || bitpos % BITS_PER_UNIT != 0
4505 || reversep
4506 || TREE_CODE (core) != VAR_DECL)
4507 {
4508 *symbol_present = false;
4509 *var_present = true;
4510 fd_ivopts_data = data;
4511 if (depends_on)
4512 walk_tree (&addr, find_depends, depends_on, NULL);
4513
4514 return new_cost (target_spill_cost[data->speed], 0);
4515 }
4516
4517 *offset += bitpos / BITS_PER_UNIT;
4518 if (TREE_STATIC (core)
4519 || DECL_EXTERNAL (core))
4520 {
4521 *symbol_present = true;
4522 *var_present = false;
4523 return no_cost;
4524 }
4525
4526 *symbol_present = false;
4527 *var_present = true;
4528 return no_cost;
4529 }
4530
4531 /* Estimates cost of expressing difference of addresses E1 - E2 as
4532 var + symbol + offset. The value of offset is added to OFFSET,
4533 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
4534 part is missing. DEPENDS_ON is a set of the invariants the computation
4535 depends on. */
4536
4537 static comp_cost
4538 ptr_difference_cost (struct ivopts_data *data,
4539 tree e1, tree e2, bool *symbol_present, bool *var_present,
4540 unsigned HOST_WIDE_INT *offset, bitmap *depends_on)
4541 {
4542 HOST_WIDE_INT diff = 0;
4543 aff_tree aff_e1, aff_e2;
4544 tree type;
4545
4546 gcc_assert (TREE_CODE (e1) == ADDR_EXPR);
4547
4548 if (ptr_difference_const (e1, e2, &diff))
4549 {
4550 *offset += diff;
4551 *symbol_present = false;
4552 *var_present = false;
4553 return no_cost;
4554 }
4555
4556 if (integer_zerop (e2))
4557 return split_address_cost (data, TREE_OPERAND (e1, 0),
4558 symbol_present, var_present, offset, depends_on);
4559
4560 *symbol_present = false;
4561 *var_present = true;
4562
4563 type = signed_type_for (TREE_TYPE (e1));
4564 tree_to_aff_combination (e1, type, &aff_e1);
4565 tree_to_aff_combination (e2, type, &aff_e2);
4566 aff_combination_scale (&aff_e2, -1);
4567 aff_combination_add (&aff_e1, &aff_e2);
4568
4569 return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
4570 }
4571
4572 /* Estimates cost of expressing difference E1 - E2 as
4573 var + symbol + offset. The value of offset is added to OFFSET,
4574 SYMBOL_PRESENT and VAR_PRESENT are set to false if the corresponding
4575 part is missing. DEPENDS_ON is a set of the invariants the computation
4576 depends on. */
4577
4578 static comp_cost
4579 difference_cost (struct ivopts_data *data,
4580 tree e1, tree e2, bool *symbol_present, bool *var_present,
4581 unsigned HOST_WIDE_INT *offset, bitmap *depends_on)
4582 {
4583 machine_mode mode = TYPE_MODE (TREE_TYPE (e1));
4584 unsigned HOST_WIDE_INT off1, off2;
4585 aff_tree aff_e1, aff_e2;
4586 tree type;
4587
4588 e1 = strip_offset (e1, &off1);
4589 e2 = strip_offset (e2, &off2);
4590 *offset += off1 - off2;
4591
4592 STRIP_NOPS (e1);
4593 STRIP_NOPS (e2);
4594
4595 if (TREE_CODE (e1) == ADDR_EXPR)
4596 return ptr_difference_cost (data, e1, e2, symbol_present, var_present,
4597 offset, depends_on);
4598 *symbol_present = false;
4599
4600 if (operand_equal_p (e1, e2, 0))
4601 {
4602 *var_present = false;
4603 return no_cost;
4604 }
4605
4606 *var_present = true;
4607
4608 if (integer_zerop (e2))
4609 return force_var_cost (data, e1, depends_on);
4610
4611 if (integer_zerop (e1))
4612 {
4613 comp_cost cost = force_var_cost (data, e2, depends_on);
4614 cost.cost += mult_by_coeff_cost (-1, mode, data->speed);
4615 return cost;
4616 }
4617
4618 type = signed_type_for (TREE_TYPE (e1));
4619 tree_to_aff_combination (e1, type, &aff_e1);
4620 tree_to_aff_combination (e2, type, &aff_e2);
4621 aff_combination_scale (&aff_e2, -1);
4622 aff_combination_add (&aff_e1, &aff_e2);
4623
4624 return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
4625 }
4626
4627 /* Returns true if AFF1 and AFF2 are identical. */
4628
4629 static bool
4630 compare_aff_trees (aff_tree *aff1, aff_tree *aff2)
4631 {
4632 unsigned i;
4633
4634 if (aff1->n != aff2->n)
4635 return false;
4636
4637 for (i = 0; i < aff1->n; i++)
4638 {
4639 if (aff1->elts[i].coef != aff2->elts[i].coef)
4640 return false;
4641
4642 if (!operand_equal_p (aff1->elts[i].val, aff2->elts[i].val, 0))
4643 return false;
4644 }
4645 return true;
4646 }
4647
4648 /* Stores EXPR in DATA->inv_expr_tab, and assigns it an inv_expr_id. */
4649
4650 static int
4651 get_expr_id (struct ivopts_data *data, tree expr)
4652 {
4653 struct iv_inv_expr_ent ent;
4654 struct iv_inv_expr_ent **slot;
4655
4656 ent.expr = expr;
4657 ent.hash = iterative_hash_expr (expr, 0);
4658 slot = data->inv_expr_tab->find_slot (&ent, INSERT);
4659 if (*slot)
4660 return (*slot)->id;
4661
4662 *slot = XNEW (struct iv_inv_expr_ent);
4663 (*slot)->expr = expr;
4664 (*slot)->hash = ent.hash;
4665 (*slot)->id = data->inv_expr_id++;
4666 return (*slot)->id;
4667 }
4668
4669 /* Returns the pseudo expr id if expression UBASE - RATIO * CBASE
4670 requires a new compiler generated temporary. Returns -1 otherwise.
4671 ADDRESS_P is a flag indicating if the expression is for address
4672 computation. */
4673
4674 static int
4675 get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase,
4676 tree cbase, HOST_WIDE_INT ratio,
4677 bool address_p)
4678 {
4679 aff_tree ubase_aff, cbase_aff;
4680 tree expr, ub, cb;
4681
4682 STRIP_NOPS (ubase);
4683 STRIP_NOPS (cbase);
4684 ub = ubase;
4685 cb = cbase;
4686
4687 if ((TREE_CODE (ubase) == INTEGER_CST)
4688 && (TREE_CODE (cbase) == INTEGER_CST))
4689 return -1;
4690
4691 /* Strips the constant part. */
4692 if (TREE_CODE (ubase) == PLUS_EXPR
4693 || TREE_CODE (ubase) == MINUS_EXPR
4694 || TREE_CODE (ubase) == POINTER_PLUS_EXPR)
4695 {
4696 if (TREE_CODE (TREE_OPERAND (ubase, 1)) == INTEGER_CST)
4697 ubase = TREE_OPERAND (ubase, 0);
4698 }
4699
4700 /* Strips the constant part. */
4701 if (TREE_CODE (cbase) == PLUS_EXPR
4702 || TREE_CODE (cbase) == MINUS_EXPR
4703 || TREE_CODE (cbase) == POINTER_PLUS_EXPR)
4704 {
4705 if (TREE_CODE (TREE_OPERAND (cbase, 1)) == INTEGER_CST)
4706 cbase = TREE_OPERAND (cbase, 0);
4707 }
4708
4709 if (address_p)
4710 {
4711 if (((TREE_CODE (ubase) == SSA_NAME)
4712 || (TREE_CODE (ubase) == ADDR_EXPR
4713 && is_gimple_min_invariant (ubase)))
4714 && (TREE_CODE (cbase) == INTEGER_CST))
4715 return -1;
4716
4717 if (((TREE_CODE (cbase) == SSA_NAME)
4718 || (TREE_CODE (cbase) == ADDR_EXPR
4719 && is_gimple_min_invariant (cbase)))
4720 && (TREE_CODE (ubase) == INTEGER_CST))
4721 return -1;
4722 }
4723
4724 if (ratio == 1)
4725 {
4726 if (operand_equal_p (ubase, cbase, 0))
4727 return -1;
4728
4729 if (TREE_CODE (ubase) == ADDR_EXPR
4730 && TREE_CODE (cbase) == ADDR_EXPR)
4731 {
4732 tree usym, csym;
4733
4734 usym = TREE_OPERAND (ubase, 0);
4735 csym = TREE_OPERAND (cbase, 0);
4736 if (TREE_CODE (usym) == ARRAY_REF)
4737 {
4738 tree ind = TREE_OPERAND (usym, 1);
4739 if (TREE_CODE (ind) == INTEGER_CST
4740 && tree_fits_shwi_p (ind)
4741 && tree_to_shwi (ind) == 0)
4742 usym = TREE_OPERAND (usym, 0);
4743 }
4744 if (TREE_CODE (csym) == ARRAY_REF)
4745 {
4746 tree ind = TREE_OPERAND (csym, 1);
4747 if (TREE_CODE (ind) == INTEGER_CST
4748 && tree_fits_shwi_p (ind)
4749 && tree_to_shwi (ind) == 0)
4750 csym = TREE_OPERAND (csym, 0);
4751 }
4752 if (operand_equal_p (usym, csym, 0))
4753 return -1;
4754 }
4755 /* Now do more complex comparison */
4756 tree_to_aff_combination (ubase, TREE_TYPE (ubase), &ubase_aff);
4757 tree_to_aff_combination (cbase, TREE_TYPE (cbase), &cbase_aff);
4758 if (compare_aff_trees (&ubase_aff, &cbase_aff))
4759 return -1;
4760 }
4761
4762 tree_to_aff_combination (ub, TREE_TYPE (ub), &ubase_aff);
4763 tree_to_aff_combination (cb, TREE_TYPE (cb), &cbase_aff);
4764
4765 aff_combination_scale (&cbase_aff, -1 * ratio);
4766 aff_combination_add (&ubase_aff, &cbase_aff);
4767 expr = aff_combination_to_tree (&ubase_aff);
4768 return get_expr_id (data, expr);
4769 }
4770
4771
4772
4773 /* Determines the cost of the computation by that USE is expressed
4774 from induction variable CAND. If ADDRESS_P is true, we just need
4775 to create an address from it, otherwise we want to get it into
4776 register. A set of invariants we depend on is stored in
4777 DEPENDS_ON. AT is the statement at that the value is computed.
4778 If CAN_AUTOINC is nonnull, use it to record whether autoinc
4779 addressing is likely. */
4780
4781 static comp_cost
4782 get_computation_cost_at (struct ivopts_data *data,
4783 struct iv_use *use, struct iv_cand *cand,
4784 bool address_p, bitmap *depends_on, gimple *at,
4785 bool *can_autoinc,
4786 int *inv_expr_id)
4787 {
4788 tree ubase = use->iv->base, ustep = use->iv->step;
4789 tree cbase, cstep;
4790 tree utype = TREE_TYPE (ubase), ctype;
4791 unsigned HOST_WIDE_INT cstepi, offset = 0;
4792 HOST_WIDE_INT ratio, aratio;
4793 bool var_present, symbol_present, stmt_is_after_inc;
4794 comp_cost cost;
4795 widest_int rat;
4796 bool speed = optimize_bb_for_speed_p (gimple_bb (at));
4797 machine_mode mem_mode = (address_p
4798 ? TYPE_MODE (TREE_TYPE (*use->op_p))
4799 : VOIDmode);
4800
4801 if (depends_on)
4802 *depends_on = NULL;
4803
4804 /* Only consider real candidates. */
4805 if (!cand->iv)
4806 return infinite_cost;
4807
4808 cbase = cand->iv->base;
4809 cstep = cand->iv->step;
4810 ctype = TREE_TYPE (cbase);
4811
4812 if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
4813 {
4814 /* We do not have a precision to express the values of use. */
4815 return infinite_cost;
4816 }
4817
4818 if (address_p
4819 || (use->iv->base_object
4820 && cand->iv->base_object
4821 && POINTER_TYPE_P (TREE_TYPE (use->iv->base_object))
4822 && POINTER_TYPE_P (TREE_TYPE (cand->iv->base_object))))
4823 {
4824 /* Do not try to express address of an object with computation based
4825 on address of a different object. This may cause problems in rtl
4826 level alias analysis (that does not expect this to be happening,
4827 as this is illegal in C), and would be unlikely to be useful
4828 anyway. */
4829 if (use->iv->base_object
4830 && cand->iv->base_object
4831 && !operand_equal_p (use->iv->base_object, cand->iv->base_object, 0))
4832 return infinite_cost;
4833 }
4834
4835 if (TYPE_PRECISION (utype) < TYPE_PRECISION (ctype))
4836 {
4837 /* TODO -- add direct handling of this case. */
4838 goto fallback;
4839 }
4840
4841 /* CSTEPI is removed from the offset in case statement is after the
4842 increment. If the step is not constant, we use zero instead.
4843 This is a bit imprecise (there is the extra addition), but
4844 redundancy elimination is likely to transform the code so that
4845 it uses value of the variable before increment anyway,
4846 so it is not that much unrealistic. */
4847 if (cst_and_fits_in_hwi (cstep))
4848 cstepi = int_cst_value (cstep);
4849 else
4850 cstepi = 0;
4851
4852 if (!constant_multiple_of (ustep, cstep, &rat))
4853 return infinite_cost;
4854
4855 if (wi::fits_shwi_p (rat))
4856 ratio = rat.to_shwi ();
4857 else
4858 return infinite_cost;
4859
4860 STRIP_NOPS (cbase);
4861 ctype = TREE_TYPE (cbase);
4862
4863 stmt_is_after_inc = stmt_after_increment (data->current_loop, cand, at);
4864
4865 /* use = ubase + ratio * (var - cbase). If either cbase is a constant
4866 or ratio == 1, it is better to handle this like
4867
4868 ubase - ratio * cbase + ratio * var
4869
4870 (also holds in the case ratio == -1, TODO. */
4871
4872 if (cst_and_fits_in_hwi (cbase))
4873 {
4874 offset = - ratio * (unsigned HOST_WIDE_INT) int_cst_value (cbase);
4875 cost = difference_cost (data,
4876 ubase, build_int_cst (utype, 0),
4877 &symbol_present, &var_present, &offset,
4878 depends_on);
4879 cost.cost /= avg_loop_niter (data->current_loop);
4880 }
4881 else if (ratio == 1)
4882 {
4883 tree real_cbase = cbase;
4884
4885 /* Check to see if any adjustment is needed. */
4886 if (cstepi == 0 && stmt_is_after_inc)
4887 {
4888 aff_tree real_cbase_aff;
4889 aff_tree cstep_aff;
4890
4891 tree_to_aff_combination (cbase, TREE_TYPE (real_cbase),
4892 &real_cbase_aff);
4893 tree_to_aff_combination (cstep, TREE_TYPE (cstep), &cstep_aff);
4894
4895 aff_combination_add (&real_cbase_aff, &cstep_aff);
4896 real_cbase = aff_combination_to_tree (&real_cbase_aff);
4897 }
4898
4899 cost = difference_cost (data,
4900 ubase, real_cbase,
4901 &symbol_present, &var_present, &offset,
4902 depends_on);
4903 cost.cost /= avg_loop_niter (data->current_loop);
4904 }
4905 else if (address_p
4906 && !POINTER_TYPE_P (ctype)
4907 && multiplier_allowed_in_address_p
4908 (ratio, mem_mode,
4909 TYPE_ADDR_SPACE (TREE_TYPE (utype))))
4910 {
4911 if (cstepi == 0 && stmt_is_after_inc)
4912 {
4913 if (POINTER_TYPE_P (ctype))
4914 cbase = fold_build2 (POINTER_PLUS_EXPR, ctype, cbase, cstep);
4915 else
4916 cbase = fold_build2 (PLUS_EXPR, ctype, cbase, cstep);
4917 }
4918 cbase
4919 = fold_build2 (MULT_EXPR, ctype, cbase, build_int_cst (ctype, ratio));
4920 cost = difference_cost (data,
4921 ubase, cbase,
4922 &symbol_present, &var_present, &offset,
4923 depends_on);
4924 cost.cost /= avg_loop_niter (data->current_loop);
4925 }
4926 else
4927 {
4928 cost = force_var_cost (data, cbase, depends_on);
4929 cost = add_costs (cost,
4930 difference_cost (data,
4931 ubase, build_int_cst (utype, 0),
4932 &symbol_present, &var_present,
4933 &offset, depends_on));
4934 cost.cost /= avg_loop_niter (data->current_loop);
4935 cost.cost += add_cost (data->speed, TYPE_MODE (ctype));
4936 }
4937
4938 /* Set of invariants depended on by sub use has already been computed
4939 for the first use in the group. */
4940 if (use->sub_id)
4941 {
4942 cost.cost = 0;
4943 if (depends_on && *depends_on)
4944 bitmap_clear (*depends_on);
4945 }
4946 else if (inv_expr_id)
4947 {
4948 *inv_expr_id =
4949 get_loop_invariant_expr_id (data, ubase, cbase, ratio, address_p);
4950 /* Clear depends on. */
4951 if (*inv_expr_id != -1 && depends_on && *depends_on)
4952 bitmap_clear (*depends_on);
4953 }
4954
4955 /* If we are after the increment, the value of the candidate is higher by
4956 one iteration. */
4957 if (stmt_is_after_inc)
4958 offset -= ratio * cstepi;
4959
4960 /* Now the computation is in shape symbol + var1 + const + ratio * var2.
4961 (symbol/var1/const parts may be omitted). If we are looking for an
4962 address, find the cost of addressing this. */
4963 if (address_p)
4964 return add_costs (cost,
4965 get_address_cost (symbol_present, var_present,
4966 offset, ratio, cstepi,
4967 mem_mode,
4968 TYPE_ADDR_SPACE (TREE_TYPE (utype)),
4969 speed, stmt_is_after_inc,
4970 can_autoinc));
4971
4972 /* Otherwise estimate the costs for computing the expression. */
4973 if (!symbol_present && !var_present && !offset)
4974 {
4975 if (ratio != 1)
4976 cost.cost += mult_by_coeff_cost (ratio, TYPE_MODE (ctype), speed);
4977 return cost;
4978 }
4979
4980 /* Symbol + offset should be compile-time computable so consider that they
4981 are added once to the variable, if present. */
4982 if (var_present && (symbol_present || offset))
4983 cost.cost += adjust_setup_cost (data,
4984 add_cost (speed, TYPE_MODE (ctype)));
4985
4986 /* Having offset does not affect runtime cost in case it is added to
4987 symbol, but it increases complexity. */
4988 if (offset)
4989 cost.complexity++;
4990
4991 cost.cost += add_cost (speed, TYPE_MODE (ctype));
4992
4993 aratio = ratio > 0 ? ratio : -ratio;
4994 if (aratio != 1)
4995 cost.cost += mult_by_coeff_cost (aratio, TYPE_MODE (ctype), speed);
4996 return cost;
4997
4998 fallback:
4999 if (can_autoinc)
5000 *can_autoinc = false;
5001
5002 {
5003 /* Just get the expression, expand it and measure the cost. */
5004 tree comp = get_computation_at (data->current_loop, use, cand, at);
5005
5006 if (!comp)
5007 return infinite_cost;
5008
5009 if (address_p)
5010 comp = build_simple_mem_ref (comp);
5011
5012 return new_cost (computation_cost (comp, speed), 0);
5013 }
5014 }
5015
5016 /* Determines the cost of the computation by that USE is expressed
5017 from induction variable CAND. If ADDRESS_P is true, we just need
5018 to create an address from it, otherwise we want to get it into
5019 register. A set of invariants we depend on is stored in
5020 DEPENDS_ON. If CAN_AUTOINC is nonnull, use it to record whether
5021 autoinc addressing is likely. */
5022
5023 static comp_cost
5024 get_computation_cost (struct ivopts_data *data,
5025 struct iv_use *use, struct iv_cand *cand,
5026 bool address_p, bitmap *depends_on,
5027 bool *can_autoinc, int *inv_expr_id)
5028 {
5029 return get_computation_cost_at (data,
5030 use, cand, address_p, depends_on, use->stmt,
5031 can_autoinc, inv_expr_id);
5032 }
5033
5034 /* Determines cost of basing replacement of USE on CAND in a generic
5035 expression. */
5036
5037 static bool
5038 determine_use_iv_cost_generic (struct ivopts_data *data,
5039 struct iv_use *use, struct iv_cand *cand)
5040 {
5041 bitmap depends_on;
5042 comp_cost cost;
5043 int inv_expr_id = -1;
5044
5045 /* The simple case first -- if we need to express value of the preserved
5046 original biv, the cost is 0. This also prevents us from counting the
5047 cost of increment twice -- once at this use and once in the cost of
5048 the candidate. */
5049 if (cand->pos == IP_ORIGINAL
5050 && cand->incremented_at == use->stmt)
5051 {
5052 set_use_iv_cost (data, use, cand, no_cost, NULL, NULL_TREE,
5053 ERROR_MARK, -1);
5054 return true;
5055 }
5056
5057 cost = get_computation_cost (data, use, cand, false, &depends_on,
5058 NULL, &inv_expr_id);
5059
5060 set_use_iv_cost (data, use, cand, cost, depends_on, NULL_TREE, ERROR_MARK,
5061 inv_expr_id);
5062
5063 return !infinite_cost_p (cost);
5064 }
5065
5066 /* Determines cost of basing replacement of USE on CAND in an address. */
5067
5068 static bool
5069 determine_use_iv_cost_address (struct ivopts_data *data,
5070 struct iv_use *use, struct iv_cand *cand)
5071 {
5072 bitmap depends_on;
5073 bool can_autoinc;
5074 int inv_expr_id = -1;
5075 struct iv_use *sub_use;
5076 comp_cost sub_cost;
5077 comp_cost cost = get_computation_cost (data, use, cand, true, &depends_on,
5078 &can_autoinc, &inv_expr_id);
5079
5080 if (cand->ainc_use == use)
5081 {
5082 if (can_autoinc)
5083 cost.cost -= cand->cost_step;
5084 /* If we generated the candidate solely for exploiting autoincrement
5085 opportunities, and it turns out it can't be used, set the cost to
5086 infinity to make sure we ignore it. */
5087 else if (cand->pos == IP_AFTER_USE || cand->pos == IP_BEFORE_USE)
5088 cost = infinite_cost;
5089 }
5090 for (sub_use = use->next;
5091 sub_use && !infinite_cost_p (cost);
5092 sub_use = sub_use->next)
5093 {
5094 sub_cost = get_computation_cost (data, sub_use, cand, true, NULL,
5095 &can_autoinc, NULL);
5096 cost = add_costs (cost, sub_cost);
5097 }
5098
5099 set_use_iv_cost (data, use, cand, cost, depends_on, NULL_TREE, ERROR_MARK,
5100 inv_expr_id);
5101
5102 return !infinite_cost_p (cost);
5103 }
5104
5105 /* Computes value of candidate CAND at position AT in iteration NITER, and
5106 stores it to VAL. */
5107
5108 static void
5109 cand_value_at (struct loop *loop, struct iv_cand *cand, gimple *at, tree niter,
5110 aff_tree *val)
5111 {
5112 aff_tree step, delta, nit;
5113 struct iv *iv = cand->iv;
5114 tree type = TREE_TYPE (iv->base);
5115 tree steptype = type;
5116 if (POINTER_TYPE_P (type))
5117 steptype = sizetype;
5118 steptype = unsigned_type_for (type);
5119
5120 tree_to_aff_combination (iv->step, TREE_TYPE (iv->step), &step);
5121 aff_combination_convert (&step, steptype);
5122 tree_to_aff_combination (niter, TREE_TYPE (niter), &nit);
5123 aff_combination_convert (&nit, steptype);
5124 aff_combination_mult (&nit, &step, &delta);
5125 if (stmt_after_increment (loop, cand, at))
5126 aff_combination_add (&delta, &step);
5127
5128 tree_to_aff_combination (iv->base, type, val);
5129 if (!POINTER_TYPE_P (type))
5130 aff_combination_convert (val, steptype);
5131 aff_combination_add (val, &delta);
5132 }
5133
5134 /* Returns period of induction variable iv. */
5135
5136 static tree
5137 iv_period (struct iv *iv)
5138 {
5139 tree step = iv->step, period, type;
5140 tree pow2div;
5141
5142 gcc_assert (step && TREE_CODE (step) == INTEGER_CST);
5143
5144 type = unsigned_type_for (TREE_TYPE (step));
5145 /* Period of the iv is lcm (step, type_range)/step -1,
5146 i.e., N*type_range/step - 1. Since type range is power
5147 of two, N == (step >> num_of_ending_zeros_binary (step),
5148 so the final result is
5149
5150 (type_range >> num_of_ending_zeros_binary (step)) - 1
5151
5152 */
5153 pow2div = num_ending_zeros (step);
5154
5155 period = build_low_bits_mask (type,
5156 (TYPE_PRECISION (type)
5157 - tree_to_uhwi (pow2div)));
5158
5159 return period;
5160 }
5161
5162 /* Returns the comparison operator used when eliminating the iv USE. */
5163
5164 static enum tree_code
5165 iv_elimination_compare (struct ivopts_data *data, struct iv_use *use)
5166 {
5167 struct loop *loop = data->current_loop;
5168 basic_block ex_bb;
5169 edge exit;
5170
5171 ex_bb = gimple_bb (use->stmt);
5172 exit = EDGE_SUCC (ex_bb, 0);
5173 if (flow_bb_inside_loop_p (loop, exit->dest))
5174 exit = EDGE_SUCC (ex_bb, 1);
5175
5176 return (exit->flags & EDGE_TRUE_VALUE ? EQ_EXPR : NE_EXPR);
5177 }
5178
5179 /* Returns true if we can prove that BASE - OFFSET does not overflow. For now,
5180 we only detect the situation that BASE = SOMETHING + OFFSET, where the
5181 calculation is performed in non-wrapping type.
5182
5183 TODO: More generally, we could test for the situation that
5184 BASE = SOMETHING + OFFSET' and OFFSET is between OFFSET' and zero.
5185 This would require knowing the sign of OFFSET. */
5186
5187 static bool
5188 difference_cannot_overflow_p (struct ivopts_data *data, tree base, tree offset)
5189 {
5190 enum tree_code code;
5191 tree e1, e2;
5192 aff_tree aff_e1, aff_e2, aff_offset;
5193
5194 if (!nowrap_type_p (TREE_TYPE (base)))
5195 return false;
5196
5197 base = expand_simple_operations (base);
5198
5199 if (TREE_CODE (base) == SSA_NAME)
5200 {
5201 gimple *stmt = SSA_NAME_DEF_STMT (base);
5202
5203 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5204 return false;
5205
5206 code = gimple_assign_rhs_code (stmt);
5207 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5208 return false;
5209
5210 e1 = gimple_assign_rhs1 (stmt);
5211 e2 = gimple_assign_rhs2 (stmt);
5212 }
5213 else
5214 {
5215 code = TREE_CODE (base);
5216 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
5217 return false;
5218 e1 = TREE_OPERAND (base, 0);
5219 e2 = TREE_OPERAND (base, 1);
5220 }
5221
5222 /* Use affine expansion as deeper inspection to prove the equality. */
5223 tree_to_aff_combination_expand (e2, TREE_TYPE (e2),
5224 &aff_e2, &data->name_expansion_cache);
5225 tree_to_aff_combination_expand (offset, TREE_TYPE (offset),
5226 &aff_offset, &data->name_expansion_cache);
5227 aff_combination_scale (&aff_offset, -1);
5228 switch (code)
5229 {
5230 case PLUS_EXPR:
5231 aff_combination_add (&aff_e2, &aff_offset);
5232 if (aff_combination_zero_p (&aff_e2))
5233 return true;
5234
5235 tree_to_aff_combination_expand (e1, TREE_TYPE (e1),
5236 &aff_e1, &data->name_expansion_cache);
5237 aff_combination_add (&aff_e1, &aff_offset);
5238 return aff_combination_zero_p (&aff_e1);
5239
5240 case POINTER_PLUS_EXPR:
5241 aff_combination_add (&aff_e2, &aff_offset);
5242 return aff_combination_zero_p (&aff_e2);
5243
5244 default:
5245 return false;
5246 }
5247 }
5248
5249 /* Tries to replace loop exit by one formulated in terms of a LT_EXPR
5250 comparison with CAND. NITER describes the number of iterations of
5251 the loops. If successful, the comparison in COMP_P is altered accordingly.
5252
5253 We aim to handle the following situation:
5254
5255 sometype *base, *p;
5256 int a, b, i;
5257
5258 i = a;
5259 p = p_0 = base + a;
5260
5261 do
5262 {
5263 bla (*p);
5264 p++;
5265 i++;
5266 }
5267 while (i < b);
5268
5269 Here, the number of iterations of the loop is (a + 1 > b) ? 0 : b - a - 1.
5270 We aim to optimize this to
5271
5272 p = p_0 = base + a;
5273 do
5274 {
5275 bla (*p);
5276 p++;
5277 }
5278 while (p < p_0 - a + b);
5279
5280 This preserves the correctness, since the pointer arithmetics does not
5281 overflow. More precisely:
5282
5283 1) if a + 1 <= b, then p_0 - a + b is the final value of p, hence there is no
5284 overflow in computing it or the values of p.
5285 2) if a + 1 > b, then we need to verify that the expression p_0 - a does not
5286 overflow. To prove this, we use the fact that p_0 = base + a. */
5287
5288 static bool
5289 iv_elimination_compare_lt (struct ivopts_data *data,
5290 struct iv_cand *cand, enum tree_code *comp_p,
5291 struct tree_niter_desc *niter)
5292 {
5293 tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
5294 struct aff_tree nit, tmpa, tmpb;
5295 enum tree_code comp;
5296 HOST_WIDE_INT step;
5297
5298 /* We need to know that the candidate induction variable does not overflow.
5299 While more complex analysis may be used to prove this, for now just
5300 check that the variable appears in the original program and that it
5301 is computed in a type that guarantees no overflows. */
5302 cand_type = TREE_TYPE (cand->iv->base);
5303 if (cand->pos != IP_ORIGINAL || !nowrap_type_p (cand_type))
5304 return false;
5305
5306 /* Make sure that the loop iterates till the loop bound is hit, as otherwise
5307 the calculation of the BOUND could overflow, making the comparison
5308 invalid. */
5309 if (!data->loop_single_exit_p)
5310 return false;
5311
5312 /* We need to be able to decide whether candidate is increasing or decreasing
5313 in order to choose the right comparison operator. */
5314 if (!cst_and_fits_in_hwi (cand->iv->step))
5315 return false;
5316 step = int_cst_value (cand->iv->step);
5317
5318 /* Check that the number of iterations matches the expected pattern:
5319 a + 1 > b ? 0 : b - a - 1. */
5320 mbz = niter->may_be_zero;
5321 if (TREE_CODE (mbz) == GT_EXPR)
5322 {
5323 /* Handle a + 1 > b. */
5324 tree op0 = TREE_OPERAND (mbz, 0);
5325 if (TREE_CODE (op0) == PLUS_EXPR && integer_onep (TREE_OPERAND (op0, 1)))
5326 {
5327 a = TREE_OPERAND (op0, 0);
5328 b = TREE_OPERAND (mbz, 1);
5329 }
5330 else
5331 return false;
5332 }
5333 else if (TREE_CODE (mbz) == LT_EXPR)
5334 {
5335 tree op1 = TREE_OPERAND (mbz, 1);
5336
5337 /* Handle b < a + 1. */
5338 if (TREE_CODE (op1) == PLUS_EXPR && integer_onep (TREE_OPERAND (op1, 1)))
5339 {
5340 a = TREE_OPERAND (op1, 0);
5341 b = TREE_OPERAND (mbz, 0);
5342 }
5343 else
5344 return false;
5345 }
5346 else
5347 return false;
5348
5349 /* Expected number of iterations is B - A - 1. Check that it matches
5350 the actual number, i.e., that B - A - NITER = 1. */
5351 tree_to_aff_combination (niter->niter, nit_type, &nit);
5352 tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa);
5353 tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb);
5354 aff_combination_scale (&nit, -1);
5355 aff_combination_scale (&tmpa, -1);
5356 aff_combination_add (&tmpb, &tmpa);
5357 aff_combination_add (&tmpb, &nit);
5358 if (tmpb.n != 0 || tmpb.offset != 1)
5359 return false;
5360
5361 /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
5362 overflow. */
5363 offset = fold_build2 (MULT_EXPR, TREE_TYPE (cand->iv->step),
5364 cand->iv->step,
5365 fold_convert (TREE_TYPE (cand->iv->step), a));
5366 if (!difference_cannot_overflow_p (data, cand->iv->base, offset))
5367 return false;
5368
5369 /* Determine the new comparison operator. */
5370 comp = step < 0 ? GT_EXPR : LT_EXPR;
5371 if (*comp_p == NE_EXPR)
5372 *comp_p = comp;
5373 else if (*comp_p == EQ_EXPR)
5374 *comp_p = invert_tree_comparison (comp, false);
5375 else
5376 gcc_unreachable ();
5377
5378 return true;
5379 }
5380
5381 /* Check whether it is possible to express the condition in USE by comparison
5382 of candidate CAND. If so, store the value compared with to BOUND, and the
5383 comparison operator to COMP. */
5384
5385 static bool
5386 may_eliminate_iv (struct ivopts_data *data,
5387 struct iv_use *use, struct iv_cand *cand, tree *bound,
5388 enum tree_code *comp)
5389 {
5390 basic_block ex_bb;
5391 edge exit;
5392 tree period;
5393 struct loop *loop = data->current_loop;
5394 aff_tree bnd;
5395 struct tree_niter_desc *desc = NULL;
5396
5397 if (TREE_CODE (cand->iv->step) != INTEGER_CST)
5398 return false;
5399
5400 /* For now works only for exits that dominate the loop latch.
5401 TODO: extend to other conditions inside loop body. */
5402 ex_bb = gimple_bb (use->stmt);
5403 if (use->stmt != last_stmt (ex_bb)
5404 || gimple_code (use->stmt) != GIMPLE_COND
5405 || !dominated_by_p (CDI_DOMINATORS, loop->latch, ex_bb))
5406 return false;
5407
5408 exit = EDGE_SUCC (ex_bb, 0);
5409 if (flow_bb_inside_loop_p (loop, exit->dest))
5410 exit = EDGE_SUCC (ex_bb, 1);
5411 if (flow_bb_inside_loop_p (loop, exit->dest))
5412 return false;
5413
5414 desc = niter_for_exit (data, exit);
5415 if (!desc)
5416 return false;
5417
5418 /* Determine whether we can use the variable to test the exit condition.
5419 This is the case iff the period of the induction variable is greater
5420 than the number of iterations for which the exit condition is true. */
5421 period = iv_period (cand->iv);
5422
5423 /* If the number of iterations is constant, compare against it directly. */
5424 if (TREE_CODE (desc->niter) == INTEGER_CST)
5425 {
5426 /* See cand_value_at. */
5427 if (stmt_after_increment (loop, cand, use->stmt))
5428 {
5429 if (!tree_int_cst_lt (desc->niter, period))
5430 return false;
5431 }
5432 else
5433 {
5434 if (tree_int_cst_lt (period, desc->niter))
5435 return false;
5436 }
5437 }
5438
5439 /* If not, and if this is the only possible exit of the loop, see whether
5440 we can get a conservative estimate on the number of iterations of the
5441 entire loop and compare against that instead. */
5442 else
5443 {
5444 widest_int period_value, max_niter;
5445
5446 max_niter = desc->max;
5447 if (stmt_after_increment (loop, cand, use->stmt))
5448 max_niter += 1;
5449 period_value = wi::to_widest (period);
5450 if (wi::gtu_p (max_niter, period_value))
5451 {
5452 /* See if we can take advantage of inferred loop bound information. */
5453 if (data->loop_single_exit_p)
5454 {
5455 if (!max_loop_iterations (loop, &max_niter))
5456 return false;
5457 /* The loop bound is already adjusted by adding 1. */
5458 if (wi::gtu_p (max_niter, period_value))
5459 return false;
5460 }
5461 else
5462 return false;
5463 }
5464 }
5465
5466 cand_value_at (loop, cand, use->stmt, desc->niter, &bnd);
5467
5468 *bound = fold_convert (TREE_TYPE (cand->iv->base),
5469 aff_combination_to_tree (&bnd));
5470 *comp = iv_elimination_compare (data, use);
5471
5472 /* It is unlikely that computing the number of iterations using division
5473 would be more profitable than keeping the original induction variable. */
5474 if (expression_expensive_p (*bound))
5475 return false;
5476
5477 /* Sometimes, it is possible to handle the situation that the number of
5478 iterations may be zero unless additional assumtions by using <
5479 instead of != in the exit condition.
5480
5481 TODO: we could also calculate the value MAY_BE_ZERO ? 0 : NITER and
5482 base the exit condition on it. However, that is often too
5483 expensive. */
5484 if (!integer_zerop (desc->may_be_zero))
5485 return iv_elimination_compare_lt (data, cand, comp, desc);
5486
5487 return true;
5488 }
5489
5490 /* Calculates the cost of BOUND, if it is a PARM_DECL. A PARM_DECL must
5491 be copied, if it is used in the loop body and DATA->body_includes_call. */
5492
5493 static int
5494 parm_decl_cost (struct ivopts_data *data, tree bound)
5495 {
5496 tree sbound = bound;
5497 STRIP_NOPS (sbound);
5498
5499 if (TREE_CODE (sbound) == SSA_NAME
5500 && SSA_NAME_IS_DEFAULT_DEF (sbound)
5501 && TREE_CODE (SSA_NAME_VAR (sbound)) == PARM_DECL
5502 && data->body_includes_call)
5503 return COSTS_N_INSNS (1);
5504
5505 return 0;
5506 }
5507
5508 /* Determines cost of basing replacement of USE on CAND in a condition. */
5509
5510 static bool
5511 determine_use_iv_cost_condition (struct ivopts_data *data,
5512 struct iv_use *use, struct iv_cand *cand)
5513 {
5514 tree bound = NULL_TREE;
5515 struct iv *cmp_iv;
5516 bitmap depends_on_elim = NULL, depends_on_express = NULL, depends_on;
5517 comp_cost elim_cost, express_cost, cost, bound_cost;
5518 bool ok;
5519 int elim_inv_expr_id = -1, express_inv_expr_id = -1, inv_expr_id;
5520 tree *control_var, *bound_cst;
5521 enum tree_code comp = ERROR_MARK;
5522
5523 /* Only consider real candidates. */
5524 if (!cand->iv)
5525 {
5526 set_use_iv_cost (data, use, cand, infinite_cost, NULL, NULL_TREE,
5527 ERROR_MARK, -1);
5528 return false;
5529 }
5530
5531 /* Try iv elimination. */
5532 if (may_eliminate_iv (data, use, cand, &bound, &comp))
5533 {
5534 elim_cost = force_var_cost (data, bound, &depends_on_elim);
5535 if (elim_cost.cost == 0)
5536 elim_cost.cost = parm_decl_cost (data, bound);
5537 else if (TREE_CODE (bound) == INTEGER_CST)
5538 elim_cost.cost = 0;
5539 /* If we replace a loop condition 'i < n' with 'p < base + n',
5540 depends_on_elim will have 'base' and 'n' set, which implies
5541 that both 'base' and 'n' will be live during the loop. More likely,
5542 'base + n' will be loop invariant, resulting in only one live value
5543 during the loop. So in that case we clear depends_on_elim and set
5544 elim_inv_expr_id instead. */
5545 if (depends_on_elim && bitmap_count_bits (depends_on_elim) > 1)
5546 {
5547 elim_inv_expr_id = get_expr_id (data, bound);
5548 bitmap_clear (depends_on_elim);
5549 }
5550 /* The bound is a loop invariant, so it will be only computed
5551 once. */
5552 elim_cost.cost = adjust_setup_cost (data, elim_cost.cost);
5553 }
5554 else
5555 elim_cost = infinite_cost;
5556
5557 /* Try expressing the original giv. If it is compared with an invariant,
5558 note that we cannot get rid of it. */
5559 ok = extract_cond_operands (data, use->stmt, &control_var, &bound_cst,
5560 NULL, &cmp_iv);
5561 gcc_assert (ok);
5562
5563 /* When the condition is a comparison of the candidate IV against
5564 zero, prefer this IV.
5565
5566 TODO: The constant that we're subtracting from the cost should
5567 be target-dependent. This information should be added to the
5568 target costs for each backend. */
5569 if (!infinite_cost_p (elim_cost) /* Do not try to decrease infinite! */
5570 && integer_zerop (*bound_cst)
5571 && (operand_equal_p (*control_var, cand->var_after, 0)
5572 || operand_equal_p (*control_var, cand->var_before, 0)))
5573 elim_cost.cost -= 1;
5574
5575 express_cost = get_computation_cost (data, use, cand, false,
5576 &depends_on_express, NULL,
5577 &express_inv_expr_id);
5578 fd_ivopts_data = data;
5579 walk_tree (&cmp_iv->base, find_depends, &depends_on_express, NULL);
5580
5581 /* Count the cost of the original bound as well. */
5582 bound_cost = force_var_cost (data, *bound_cst, NULL);
5583 if (bound_cost.cost == 0)
5584 bound_cost.cost = parm_decl_cost (data, *bound_cst);
5585 else if (TREE_CODE (*bound_cst) == INTEGER_CST)
5586 bound_cost.cost = 0;
5587 express_cost.cost += bound_cost.cost;
5588
5589 /* Choose the better approach, preferring the eliminated IV. */
5590 if (compare_costs (elim_cost, express_cost) <= 0)
5591 {
5592 cost = elim_cost;
5593 depends_on = depends_on_elim;
5594 depends_on_elim = NULL;
5595 inv_expr_id = elim_inv_expr_id;
5596 }
5597 else
5598 {
5599 cost = express_cost;
5600 depends_on = depends_on_express;
5601 depends_on_express = NULL;
5602 bound = NULL_TREE;
5603 comp = ERROR_MARK;
5604 inv_expr_id = express_inv_expr_id;
5605 }
5606
5607 set_use_iv_cost (data, use, cand, cost, depends_on, bound, comp, inv_expr_id);
5608
5609 if (depends_on_elim)
5610 BITMAP_FREE (depends_on_elim);
5611 if (depends_on_express)
5612 BITMAP_FREE (depends_on_express);
5613
5614 return !infinite_cost_p (cost);
5615 }
5616
5617 /* Determines cost of basing replacement of USE on CAND. Returns false
5618 if USE cannot be based on CAND. */
5619
5620 static bool
5621 determine_use_iv_cost (struct ivopts_data *data,
5622 struct iv_use *use, struct iv_cand *cand)
5623 {
5624 switch (use->type)
5625 {
5626 case USE_NONLINEAR_EXPR:
5627 return determine_use_iv_cost_generic (data, use, cand);
5628
5629 case USE_ADDRESS:
5630 return determine_use_iv_cost_address (data, use, cand);
5631
5632 case USE_COMPARE:
5633 return determine_use_iv_cost_condition (data, use, cand);
5634
5635 default:
5636 gcc_unreachable ();
5637 }
5638 }
5639
5640 /* Return true if get_computation_cost indicates that autoincrement is
5641 a possibility for the pair of USE and CAND, false otherwise. */
5642
5643 static bool
5644 autoinc_possible_for_pair (struct ivopts_data *data, struct iv_use *use,
5645 struct iv_cand *cand)
5646 {
5647 bitmap depends_on;
5648 bool can_autoinc;
5649 comp_cost cost;
5650
5651 if (use->type != USE_ADDRESS)
5652 return false;
5653
5654 cost = get_computation_cost (data, use, cand, true, &depends_on,
5655 &can_autoinc, NULL);
5656
5657 BITMAP_FREE (depends_on);
5658
5659 return !infinite_cost_p (cost) && can_autoinc;
5660 }
5661
5662 /* Examine IP_ORIGINAL candidates to see if they are incremented next to a
5663 use that allows autoincrement, and set their AINC_USE if possible. */
5664
5665 static void
5666 set_autoinc_for_original_candidates (struct ivopts_data *data)
5667 {
5668 unsigned i, j;
5669
5670 for (i = 0; i < n_iv_cands (data); i++)
5671 {
5672 struct iv_cand *cand = iv_cand (data, i);
5673 struct iv_use *closest_before = NULL;
5674 struct iv_use *closest_after = NULL;
5675 if (cand->pos != IP_ORIGINAL)
5676 continue;
5677
5678 for (j = 0; j < n_iv_uses (data); j++)
5679 {
5680 struct iv_use *use = iv_use (data, j);
5681 unsigned uid = gimple_uid (use->stmt);
5682
5683 if (gimple_bb (use->stmt) != gimple_bb (cand->incremented_at))
5684 continue;
5685
5686 if (uid < gimple_uid (cand->incremented_at)
5687 && (closest_before == NULL
5688 || uid > gimple_uid (closest_before->stmt)))
5689 closest_before = use;
5690
5691 if (uid > gimple_uid (cand->incremented_at)
5692 && (closest_after == NULL
5693 || uid < gimple_uid (closest_after->stmt)))
5694 closest_after = use;
5695 }
5696
5697 if (closest_before != NULL
5698 && autoinc_possible_for_pair (data, closest_before, cand))
5699 cand->ainc_use = closest_before;
5700 else if (closest_after != NULL
5701 && autoinc_possible_for_pair (data, closest_after, cand))
5702 cand->ainc_use = closest_after;
5703 }
5704 }
5705
5706 /* Finds the candidates for the induction variables. */
5707
5708 static void
5709 find_iv_candidates (struct ivopts_data *data)
5710 {
5711 /* Add commonly used ivs. */
5712 add_standard_iv_candidates (data);
5713
5714 /* Add old induction variables. */
5715 add_iv_candidate_for_bivs (data);
5716
5717 /* Add induction variables derived from uses. */
5718 add_iv_candidate_for_uses (data);
5719
5720 set_autoinc_for_original_candidates (data);
5721
5722 /* Record the important candidates. */
5723 record_important_candidates (data);
5724 }
5725
5726 /* Determines costs of basing the use of the iv on an iv candidate. */
5727
5728 static void
5729 determine_use_iv_costs (struct ivopts_data *data)
5730 {
5731 unsigned i, j;
5732 struct iv_use *use;
5733 struct iv_cand *cand;
5734 bitmap to_clear = BITMAP_ALLOC (NULL);
5735
5736 alloc_use_cost_map (data);
5737
5738 for (i = 0; i < n_iv_uses (data); i++)
5739 {
5740 use = iv_use (data, i);
5741
5742 if (data->consider_all_candidates)
5743 {
5744 for (j = 0; j < n_iv_cands (data); j++)
5745 {
5746 cand = iv_cand (data, j);
5747 determine_use_iv_cost (data, use, cand);
5748 }
5749 }
5750 else
5751 {
5752 bitmap_iterator bi;
5753
5754 EXECUTE_IF_SET_IN_BITMAP (use->related_cands, 0, j, bi)
5755 {
5756 cand = iv_cand (data, j);
5757 if (!determine_use_iv_cost (data, use, cand))
5758 bitmap_set_bit (to_clear, j);
5759 }
5760
5761 /* Remove the candidates for that the cost is infinite from
5762 the list of related candidates. */
5763 bitmap_and_compl_into (use->related_cands, to_clear);
5764 bitmap_clear (to_clear);
5765 }
5766 }
5767
5768 BITMAP_FREE (to_clear);
5769
5770 if (dump_file && (dump_flags & TDF_DETAILS))
5771 {
5772 fprintf (dump_file, "Use-candidate costs:\n");
5773
5774 for (i = 0; i < n_iv_uses (data); i++)
5775 {
5776 use = iv_use (data, i);
5777
5778 fprintf (dump_file, "Use %d:\n", i);
5779 fprintf (dump_file, " cand\tcost\tcompl.\tdepends on\n");
5780 for (j = 0; j < use->n_map_members; j++)
5781 {
5782 if (!use->cost_map[j].cand
5783 || infinite_cost_p (use->cost_map[j].cost))
5784 continue;
5785
5786 fprintf (dump_file, " %d\t%d\t%d\t",
5787 use->cost_map[j].cand->id,
5788 use->cost_map[j].cost.cost,
5789 use->cost_map[j].cost.complexity);
5790 if (use->cost_map[j].depends_on)
5791 bitmap_print (dump_file,
5792 use->cost_map[j].depends_on, "","");
5793 if (use->cost_map[j].inv_expr_id != -1)
5794 fprintf (dump_file, " inv_expr:%d", use->cost_map[j].inv_expr_id);
5795 fprintf (dump_file, "\n");
5796 }
5797
5798 fprintf (dump_file, "\n");
5799 }
5800 fprintf (dump_file, "\n");
5801 }
5802 }
5803
5804 /* Determines cost of the candidate CAND. */
5805
5806 static void
5807 determine_iv_cost (struct ivopts_data *data, struct iv_cand *cand)
5808 {
5809 comp_cost cost_base;
5810 unsigned cost, cost_step;
5811 tree base;
5812
5813 if (!cand->iv)
5814 {
5815 cand->cost = 0;
5816 return;
5817 }
5818
5819 /* There are two costs associated with the candidate -- its increment
5820 and its initialization. The second is almost negligible for any loop
5821 that rolls enough, so we take it just very little into account. */
5822
5823 base = cand->iv->base;
5824 cost_base = force_var_cost (data, base, NULL);
5825 /* It will be exceptional that the iv register happens to be initialized with
5826 the proper value at no cost. In general, there will at least be a regcopy
5827 or a const set. */
5828 if (cost_base.cost == 0)
5829 cost_base.cost = COSTS_N_INSNS (1);
5830 cost_step = add_cost (data->speed, TYPE_MODE (TREE_TYPE (base)));
5831
5832 cost = cost_step + adjust_setup_cost (data, cost_base.cost);
5833
5834 /* Prefer the original ivs unless we may gain something by replacing it.
5835 The reason is to make debugging simpler; so this is not relevant for
5836 artificial ivs created by other optimization passes. */
5837 if (cand->pos != IP_ORIGINAL
5838 || !SSA_NAME_VAR (cand->var_before)
5839 || DECL_ARTIFICIAL (SSA_NAME_VAR (cand->var_before)))
5840 cost++;
5841
5842 /* Prefer not to insert statements into latch unless there are some
5843 already (so that we do not create unnecessary jumps). */
5844 if (cand->pos == IP_END
5845 && empty_block_p (ip_end_pos (data->current_loop)))
5846 cost++;
5847
5848 cand->cost = cost;
5849 cand->cost_step = cost_step;
5850 }
5851
5852 /* Determines costs of computation of the candidates. */
5853
5854 static void
5855 determine_iv_costs (struct ivopts_data *data)
5856 {
5857 unsigned i;
5858
5859 if (dump_file && (dump_flags & TDF_DETAILS))
5860 {
5861 fprintf (dump_file, "Candidate costs:\n");
5862 fprintf (dump_file, " cand\tcost\n");
5863 }
5864
5865 for (i = 0; i < n_iv_cands (data); i++)
5866 {
5867 struct iv_cand *cand = iv_cand (data, i);
5868
5869 determine_iv_cost (data, cand);
5870
5871 if (dump_file && (dump_flags & TDF_DETAILS))
5872 fprintf (dump_file, " %d\t%d\n", i, cand->cost);
5873 }
5874
5875 if (dump_file && (dump_flags & TDF_DETAILS))
5876 fprintf (dump_file, "\n");
5877 }
5878
5879 /* Calculates cost for having SIZE induction variables. */
5880
5881 static unsigned
5882 ivopts_global_cost_for_size (struct ivopts_data *data, unsigned size)
5883 {
5884 /* We add size to the cost, so that we prefer eliminating ivs
5885 if possible. */
5886 return size + estimate_reg_pressure_cost (size, data->regs_used, data->speed,
5887 data->body_includes_call);
5888 }
5889
5890 /* For each size of the induction variable set determine the penalty. */
5891
5892 static void
5893 determine_set_costs (struct ivopts_data *data)
5894 {
5895 unsigned j, n;
5896 gphi *phi;
5897 gphi_iterator psi;
5898 tree op;
5899 struct loop *loop = data->current_loop;
5900 bitmap_iterator bi;
5901
5902 if (dump_file && (dump_flags & TDF_DETAILS))
5903 {
5904 fprintf (dump_file, "Global costs:\n");
5905 fprintf (dump_file, " target_avail_regs %d\n", target_avail_regs);
5906 fprintf (dump_file, " target_clobbered_regs %d\n", target_clobbered_regs);
5907 fprintf (dump_file, " target_reg_cost %d\n", target_reg_cost[data->speed]);
5908 fprintf (dump_file, " target_spill_cost %d\n", target_spill_cost[data->speed]);
5909 }
5910
5911 n = 0;
5912 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
5913 {
5914 phi = psi.phi ();
5915 op = PHI_RESULT (phi);
5916
5917 if (virtual_operand_p (op))
5918 continue;
5919
5920 if (get_iv (data, op))
5921 continue;
5922
5923 n++;
5924 }
5925
5926 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
5927 {
5928 struct version_info *info = ver_info (data, j);
5929
5930 if (info->inv_id && info->has_nonlin_use)
5931 n++;
5932 }
5933
5934 data->regs_used = n;
5935 if (dump_file && (dump_flags & TDF_DETAILS))
5936 fprintf (dump_file, " regs_used %d\n", n);
5937
5938 if (dump_file && (dump_flags & TDF_DETAILS))
5939 {
5940 fprintf (dump_file, " cost for size:\n");
5941 fprintf (dump_file, " ivs\tcost\n");
5942 for (j = 0; j <= 2 * target_avail_regs; j++)
5943 fprintf (dump_file, " %d\t%d\n", j,
5944 ivopts_global_cost_for_size (data, j));
5945 fprintf (dump_file, "\n");
5946 }
5947 }
5948
5949 /* Returns true if A is a cheaper cost pair than B. */
5950
5951 static bool
5952 cheaper_cost_pair (struct cost_pair *a, struct cost_pair *b)
5953 {
5954 int cmp;
5955
5956 if (!a)
5957 return false;
5958
5959 if (!b)
5960 return true;
5961
5962 cmp = compare_costs (a->cost, b->cost);
5963 if (cmp < 0)
5964 return true;
5965
5966 if (cmp > 0)
5967 return false;
5968
5969 /* In case the costs are the same, prefer the cheaper candidate. */
5970 if (a->cand->cost < b->cand->cost)
5971 return true;
5972
5973 return false;
5974 }
5975
5976
5977 /* Returns candidate by that USE is expressed in IVS. */
5978
5979 static struct cost_pair *
5980 iv_ca_cand_for_use (struct iv_ca *ivs, struct iv_use *use)
5981 {
5982 return ivs->cand_for_use[use->id];
5983 }
5984
5985 /* Computes the cost field of IVS structure. */
5986
5987 static void
5988 iv_ca_recount_cost (struct ivopts_data *data, struct iv_ca *ivs)
5989 {
5990 comp_cost cost = ivs->cand_use_cost;
5991
5992 cost.cost += ivs->cand_cost;
5993
5994 cost.cost += ivopts_global_cost_for_size (data,
5995 ivs->n_regs + ivs->num_used_inv_expr);
5996
5997 ivs->cost = cost;
5998 }
5999
6000 /* Remove invariants in set INVS to set IVS. */
6001
6002 static void
6003 iv_ca_set_remove_invariants (struct iv_ca *ivs, bitmap invs)
6004 {
6005 bitmap_iterator bi;
6006 unsigned iid;
6007
6008 if (!invs)
6009 return;
6010
6011 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6012 {
6013 ivs->n_invariant_uses[iid]--;
6014 if (ivs->n_invariant_uses[iid] == 0)
6015 ivs->n_regs--;
6016 }
6017 }
6018
6019 /* Set USE not to be expressed by any candidate in IVS. */
6020
6021 static void
6022 iv_ca_set_no_cp (struct ivopts_data *data, struct iv_ca *ivs,
6023 struct iv_use *use)
6024 {
6025 unsigned uid = use->id, cid;
6026 struct cost_pair *cp;
6027
6028 cp = ivs->cand_for_use[uid];
6029 if (!cp)
6030 return;
6031 cid = cp->cand->id;
6032
6033 ivs->bad_uses++;
6034 ivs->cand_for_use[uid] = NULL;
6035 ivs->n_cand_uses[cid]--;
6036
6037 if (ivs->n_cand_uses[cid] == 0)
6038 {
6039 bitmap_clear_bit (ivs->cands, cid);
6040 /* Do not count the pseudocandidates. */
6041 if (cp->cand->iv)
6042 ivs->n_regs--;
6043 ivs->n_cands--;
6044 ivs->cand_cost -= cp->cand->cost;
6045
6046 iv_ca_set_remove_invariants (ivs, cp->cand->depends_on);
6047 }
6048
6049 ivs->cand_use_cost = sub_costs (ivs->cand_use_cost, cp->cost);
6050
6051 iv_ca_set_remove_invariants (ivs, cp->depends_on);
6052
6053 if (cp->inv_expr_id != -1)
6054 {
6055 ivs->used_inv_expr[cp->inv_expr_id]--;
6056 if (ivs->used_inv_expr[cp->inv_expr_id] == 0)
6057 ivs->num_used_inv_expr--;
6058 }
6059 iv_ca_recount_cost (data, ivs);
6060 }
6061
6062 /* Add invariants in set INVS to set IVS. */
6063
6064 static void
6065 iv_ca_set_add_invariants (struct iv_ca *ivs, bitmap invs)
6066 {
6067 bitmap_iterator bi;
6068 unsigned iid;
6069
6070 if (!invs)
6071 return;
6072
6073 EXECUTE_IF_SET_IN_BITMAP (invs, 0, iid, bi)
6074 {
6075 ivs->n_invariant_uses[iid]++;
6076 if (ivs->n_invariant_uses[iid] == 1)
6077 ivs->n_regs++;
6078 }
6079 }
6080
6081 /* Set cost pair for USE in set IVS to CP. */
6082
6083 static void
6084 iv_ca_set_cp (struct ivopts_data *data, struct iv_ca *ivs,
6085 struct iv_use *use, struct cost_pair *cp)
6086 {
6087 unsigned uid = use->id, cid;
6088
6089 if (ivs->cand_for_use[uid] == cp)
6090 return;
6091
6092 if (ivs->cand_for_use[uid])
6093 iv_ca_set_no_cp (data, ivs, use);
6094
6095 if (cp)
6096 {
6097 cid = cp->cand->id;
6098
6099 ivs->bad_uses--;
6100 ivs->cand_for_use[uid] = cp;
6101 ivs->n_cand_uses[cid]++;
6102 if (ivs->n_cand_uses[cid] == 1)
6103 {
6104 bitmap_set_bit (ivs->cands, cid);
6105 /* Do not count the pseudocandidates. */
6106 if (cp->cand->iv)
6107 ivs->n_regs++;
6108 ivs->n_cands++;
6109 ivs->cand_cost += cp->cand->cost;
6110
6111 iv_ca_set_add_invariants (ivs, cp->cand->depends_on);
6112 }
6113
6114 ivs->cand_use_cost = add_costs (ivs->cand_use_cost, cp->cost);
6115 iv_ca_set_add_invariants (ivs, cp->depends_on);
6116
6117 if (cp->inv_expr_id != -1)
6118 {
6119 ivs->used_inv_expr[cp->inv_expr_id]++;
6120 if (ivs->used_inv_expr[cp->inv_expr_id] == 1)
6121 ivs->num_used_inv_expr++;
6122 }
6123 iv_ca_recount_cost (data, ivs);
6124 }
6125 }
6126
6127 /* Extend set IVS by expressing USE by some of the candidates in it
6128 if possible. Consider all important candidates if candidates in
6129 set IVS don't give any result. */
6130
6131 static void
6132 iv_ca_add_use (struct ivopts_data *data, struct iv_ca *ivs,
6133 struct iv_use *use)
6134 {
6135 struct cost_pair *best_cp = NULL, *cp;
6136 bitmap_iterator bi;
6137 unsigned i;
6138 struct iv_cand *cand;
6139
6140 gcc_assert (ivs->upto >= use->id);
6141 ivs->upto++;
6142 ivs->bad_uses++;
6143
6144 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6145 {
6146 cand = iv_cand (data, i);
6147 cp = get_use_iv_cost (data, use, cand);
6148 if (cheaper_cost_pair (cp, best_cp))
6149 best_cp = cp;
6150 }
6151
6152 if (best_cp == NULL)
6153 {
6154 EXECUTE_IF_SET_IN_BITMAP (data->important_candidates, 0, i, bi)
6155 {
6156 cand = iv_cand (data, i);
6157 cp = get_use_iv_cost (data, use, cand);
6158 if (cheaper_cost_pair (cp, best_cp))
6159 best_cp = cp;
6160 }
6161 }
6162
6163 iv_ca_set_cp (data, ivs, use, best_cp);
6164 }
6165
6166 /* Get cost for assignment IVS. */
6167
6168 static comp_cost
6169 iv_ca_cost (struct iv_ca *ivs)
6170 {
6171 /* This was a conditional expression but it triggered a bug in
6172 Sun C 5.5. */
6173 if (ivs->bad_uses)
6174 return infinite_cost;
6175 else
6176 return ivs->cost;
6177 }
6178
6179 /* Returns true if all dependences of CP are among invariants in IVS. */
6180
6181 static bool
6182 iv_ca_has_deps (struct iv_ca *ivs, struct cost_pair *cp)
6183 {
6184 unsigned i;
6185 bitmap_iterator bi;
6186
6187 if (!cp->depends_on)
6188 return true;
6189
6190 EXECUTE_IF_SET_IN_BITMAP (cp->depends_on, 0, i, bi)
6191 {
6192 if (ivs->n_invariant_uses[i] == 0)
6193 return false;
6194 }
6195
6196 return true;
6197 }
6198
6199 /* Creates change of expressing USE by NEW_CP instead of OLD_CP and chains
6200 it before NEXT_CHANGE. */
6201
6202 static struct iv_ca_delta *
6203 iv_ca_delta_add (struct iv_use *use, struct cost_pair *old_cp,
6204 struct cost_pair *new_cp, struct iv_ca_delta *next_change)
6205 {
6206 struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
6207
6208 change->use = use;
6209 change->old_cp = old_cp;
6210 change->new_cp = new_cp;
6211 change->next_change = next_change;
6212
6213 return change;
6214 }
6215
6216 /* Joins two lists of changes L1 and L2. Destructive -- old lists
6217 are rewritten. */
6218
6219 static struct iv_ca_delta *
6220 iv_ca_delta_join (struct iv_ca_delta *l1, struct iv_ca_delta *l2)
6221 {
6222 struct iv_ca_delta *last;
6223
6224 if (!l2)
6225 return l1;
6226
6227 if (!l1)
6228 return l2;
6229
6230 for (last = l1; last->next_change; last = last->next_change)
6231 continue;
6232 last->next_change = l2;
6233
6234 return l1;
6235 }
6236
6237 /* Reverse the list of changes DELTA, forming the inverse to it. */
6238
6239 static struct iv_ca_delta *
6240 iv_ca_delta_reverse (struct iv_ca_delta *delta)
6241 {
6242 struct iv_ca_delta *act, *next, *prev = NULL;
6243
6244 for (act = delta; act; act = next)
6245 {
6246 next = act->next_change;
6247 act->next_change = prev;
6248 prev = act;
6249
6250 std::swap (act->old_cp, act->new_cp);
6251 }
6252
6253 return prev;
6254 }
6255
6256 /* Commit changes in DELTA to IVS. If FORWARD is false, the changes are
6257 reverted instead. */
6258
6259 static void
6260 iv_ca_delta_commit (struct ivopts_data *data, struct iv_ca *ivs,
6261 struct iv_ca_delta *delta, bool forward)
6262 {
6263 struct cost_pair *from, *to;
6264 struct iv_ca_delta *act;
6265
6266 if (!forward)
6267 delta = iv_ca_delta_reverse (delta);
6268
6269 for (act = delta; act; act = act->next_change)
6270 {
6271 from = act->old_cp;
6272 to = act->new_cp;
6273 gcc_assert (iv_ca_cand_for_use (ivs, act->use) == from);
6274 iv_ca_set_cp (data, ivs, act->use, to);
6275 }
6276
6277 if (!forward)
6278 iv_ca_delta_reverse (delta);
6279 }
6280
6281 /* Returns true if CAND is used in IVS. */
6282
6283 static bool
6284 iv_ca_cand_used_p (struct iv_ca *ivs, struct iv_cand *cand)
6285 {
6286 return ivs->n_cand_uses[cand->id] > 0;
6287 }
6288
6289 /* Returns number of induction variable candidates in the set IVS. */
6290
6291 static unsigned
6292 iv_ca_n_cands (struct iv_ca *ivs)
6293 {
6294 return ivs->n_cands;
6295 }
6296
6297 /* Free the list of changes DELTA. */
6298
6299 static void
6300 iv_ca_delta_free (struct iv_ca_delta **delta)
6301 {
6302 struct iv_ca_delta *act, *next;
6303
6304 for (act = *delta; act; act = next)
6305 {
6306 next = act->next_change;
6307 free (act);
6308 }
6309
6310 *delta = NULL;
6311 }
6312
6313 /* Allocates new iv candidates assignment. */
6314
6315 static struct iv_ca *
6316 iv_ca_new (struct ivopts_data *data)
6317 {
6318 struct iv_ca *nw = XNEW (struct iv_ca);
6319
6320 nw->upto = 0;
6321 nw->bad_uses = 0;
6322 nw->cand_for_use = XCNEWVEC (struct cost_pair *, n_iv_uses (data));
6323 nw->n_cand_uses = XCNEWVEC (unsigned, n_iv_cands (data));
6324 nw->cands = BITMAP_ALLOC (NULL);
6325 nw->n_cands = 0;
6326 nw->n_regs = 0;
6327 nw->cand_use_cost = no_cost;
6328 nw->cand_cost = 0;
6329 nw->n_invariant_uses = XCNEWVEC (unsigned, data->max_inv_id + 1);
6330 nw->cost = no_cost;
6331 nw->used_inv_expr = XCNEWVEC (unsigned, data->inv_expr_id + 1);
6332 nw->num_used_inv_expr = 0;
6333
6334 return nw;
6335 }
6336
6337 /* Free memory occupied by the set IVS. */
6338
6339 static void
6340 iv_ca_free (struct iv_ca **ivs)
6341 {
6342 free ((*ivs)->cand_for_use);
6343 free ((*ivs)->n_cand_uses);
6344 BITMAP_FREE ((*ivs)->cands);
6345 free ((*ivs)->n_invariant_uses);
6346 free ((*ivs)->used_inv_expr);
6347 free (*ivs);
6348 *ivs = NULL;
6349 }
6350
6351 /* Dumps IVS to FILE. */
6352
6353 static void
6354 iv_ca_dump (struct ivopts_data *data, FILE *file, struct iv_ca *ivs)
6355 {
6356 const char *pref = " invariants ";
6357 unsigned i;
6358 comp_cost cost = iv_ca_cost (ivs);
6359
6360 fprintf (file, " cost: %d (complexity %d)\n", cost.cost, cost.complexity);
6361 fprintf (file, " cand_cost: %d\n cand_use_cost: %d (complexity %d)\n",
6362 ivs->cand_cost, ivs->cand_use_cost.cost, ivs->cand_use_cost.complexity);
6363 bitmap_print (file, ivs->cands, " candidates: ","\n");
6364
6365 for (i = 0; i < ivs->upto; i++)
6366 {
6367 struct iv_use *use = iv_use (data, i);
6368 struct cost_pair *cp = iv_ca_cand_for_use (ivs, use);
6369 if (cp)
6370 fprintf (file, " use:%d --> iv_cand:%d, cost=(%d,%d)\n",
6371 use->id, cp->cand->id, cp->cost.cost, cp->cost.complexity);
6372 else
6373 fprintf (file, " use:%d --> ??\n", use->id);
6374 }
6375
6376 for (i = 1; i <= data->max_inv_id; i++)
6377 if (ivs->n_invariant_uses[i])
6378 {
6379 fprintf (file, "%s%d", pref, i);
6380 pref = ", ";
6381 }
6382 fprintf (file, "\n\n");
6383 }
6384
6385 /* Try changing candidate in IVS to CAND for each use. Return cost of the
6386 new set, and store differences in DELTA. Number of induction variables
6387 in the new set is stored to N_IVS. MIN_NCAND is a flag. When it is true
6388 the function will try to find a solution with mimimal iv candidates. */
6389
6390 static comp_cost
6391 iv_ca_extend (struct ivopts_data *data, struct iv_ca *ivs,
6392 struct iv_cand *cand, struct iv_ca_delta **delta,
6393 unsigned *n_ivs, bool min_ncand)
6394 {
6395 unsigned i;
6396 comp_cost cost;
6397 struct iv_use *use;
6398 struct cost_pair *old_cp, *new_cp;
6399
6400 *delta = NULL;
6401 for (i = 0; i < ivs->upto; i++)
6402 {
6403 use = iv_use (data, i);
6404 old_cp = iv_ca_cand_for_use (ivs, use);
6405
6406 if (old_cp
6407 && old_cp->cand == cand)
6408 continue;
6409
6410 new_cp = get_use_iv_cost (data, use, cand);
6411 if (!new_cp)
6412 continue;
6413
6414 if (!min_ncand && !iv_ca_has_deps (ivs, new_cp))
6415 continue;
6416
6417 if (!min_ncand && !cheaper_cost_pair (new_cp, old_cp))
6418 continue;
6419
6420 *delta = iv_ca_delta_add (use, old_cp, new_cp, *delta);
6421 }
6422
6423 iv_ca_delta_commit (data, ivs, *delta, true);
6424 cost = iv_ca_cost (ivs);
6425 if (n_ivs)
6426 *n_ivs = iv_ca_n_cands (ivs);
6427 iv_ca_delta_commit (data, ivs, *delta, false);
6428
6429 return cost;
6430 }
6431
6432 /* Try narrowing set IVS by removing CAND. Return the cost of
6433 the new set and store the differences in DELTA. START is
6434 the candidate with which we start narrowing. */
6435
6436 static comp_cost
6437 iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
6438 struct iv_cand *cand, struct iv_cand *start,
6439 struct iv_ca_delta **delta)
6440 {
6441 unsigned i, ci;
6442 struct iv_use *use;
6443 struct cost_pair *old_cp, *new_cp, *cp;
6444 bitmap_iterator bi;
6445 struct iv_cand *cnd;
6446 comp_cost cost, best_cost, acost;
6447
6448 *delta = NULL;
6449 for (i = 0; i < n_iv_uses (data); i++)
6450 {
6451 use = iv_use (data, i);
6452
6453 old_cp = iv_ca_cand_for_use (ivs, use);
6454 if (old_cp->cand != cand)
6455 continue;
6456
6457 best_cost = iv_ca_cost (ivs);
6458 /* Start narrowing with START. */
6459 new_cp = get_use_iv_cost (data, use, start);
6460
6461 if (data->consider_all_candidates)
6462 {
6463 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, ci, bi)
6464 {
6465 if (ci == cand->id || (start && ci == start->id))
6466 continue;
6467
6468 cnd = iv_cand (data, ci);
6469
6470 cp = get_use_iv_cost (data, use, cnd);
6471 if (!cp)
6472 continue;
6473
6474 iv_ca_set_cp (data, ivs, use, cp);
6475 acost = iv_ca_cost (ivs);
6476
6477 if (compare_costs (acost, best_cost) < 0)
6478 {
6479 best_cost = acost;
6480 new_cp = cp;
6481 }
6482 }
6483 }
6484 else
6485 {
6486 EXECUTE_IF_AND_IN_BITMAP (use->related_cands, ivs->cands, 0, ci, bi)
6487 {
6488 if (ci == cand->id || (start && ci == start->id))
6489 continue;
6490
6491 cnd = iv_cand (data, ci);
6492
6493 cp = get_use_iv_cost (data, use, cnd);
6494 if (!cp)
6495 continue;
6496
6497 iv_ca_set_cp (data, ivs, use, cp);
6498 acost = iv_ca_cost (ivs);
6499
6500 if (compare_costs (acost, best_cost) < 0)
6501 {
6502 best_cost = acost;
6503 new_cp = cp;
6504 }
6505 }
6506 }
6507 /* Restore to old cp for use. */
6508 iv_ca_set_cp (data, ivs, use, old_cp);
6509
6510 if (!new_cp)
6511 {
6512 iv_ca_delta_free (delta);
6513 return infinite_cost;
6514 }
6515
6516 *delta = iv_ca_delta_add (use, old_cp, new_cp, *delta);
6517 }
6518
6519 iv_ca_delta_commit (data, ivs, *delta, true);
6520 cost = iv_ca_cost (ivs);
6521 iv_ca_delta_commit (data, ivs, *delta, false);
6522
6523 return cost;
6524 }
6525
6526 /* Try optimizing the set of candidates IVS by removing candidates different
6527 from to EXCEPT_CAND from it. Return cost of the new set, and store
6528 differences in DELTA. */
6529
6530 static comp_cost
6531 iv_ca_prune (struct ivopts_data *data, struct iv_ca *ivs,
6532 struct iv_cand *except_cand, struct iv_ca_delta **delta)
6533 {
6534 bitmap_iterator bi;
6535 struct iv_ca_delta *act_delta, *best_delta;
6536 unsigned i;
6537 comp_cost best_cost, acost;
6538 struct iv_cand *cand;
6539
6540 best_delta = NULL;
6541 best_cost = iv_ca_cost (ivs);
6542
6543 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6544 {
6545 cand = iv_cand (data, i);
6546
6547 if (cand == except_cand)
6548 continue;
6549
6550 acost = iv_ca_narrow (data, ivs, cand, except_cand, &act_delta);
6551
6552 if (compare_costs (acost, best_cost) < 0)
6553 {
6554 best_cost = acost;
6555 iv_ca_delta_free (&best_delta);
6556 best_delta = act_delta;
6557 }
6558 else
6559 iv_ca_delta_free (&act_delta);
6560 }
6561
6562 if (!best_delta)
6563 {
6564 *delta = NULL;
6565 return best_cost;
6566 }
6567
6568 /* Recurse to possibly remove other unnecessary ivs. */
6569 iv_ca_delta_commit (data, ivs, best_delta, true);
6570 best_cost = iv_ca_prune (data, ivs, except_cand, delta);
6571 iv_ca_delta_commit (data, ivs, best_delta, false);
6572 *delta = iv_ca_delta_join (best_delta, *delta);
6573 return best_cost;
6574 }
6575
6576 /* Check if CAND_IDX is a candidate other than OLD_CAND and has
6577 cheaper local cost for USE than BEST_CP. Return pointer to
6578 the corresponding cost_pair, otherwise just return BEST_CP. */
6579
6580 static struct cost_pair*
6581 cheaper_cost_with_cand (struct ivopts_data *data, struct iv_use *use,
6582 unsigned int cand_idx, struct iv_cand *old_cand,
6583 struct cost_pair *best_cp)
6584 {
6585 struct iv_cand *cand;
6586 struct cost_pair *cp;
6587
6588 gcc_assert (old_cand != NULL && best_cp != NULL);
6589 if (cand_idx == old_cand->id)
6590 return best_cp;
6591
6592 cand = iv_cand (data, cand_idx);
6593 cp = get_use_iv_cost (data, use, cand);
6594 if (cp != NULL && cheaper_cost_pair (cp, best_cp))
6595 return cp;
6596
6597 return best_cp;
6598 }
6599
6600 /* Try breaking local optimal fixed-point for IVS by replacing candidates
6601 which are used by more than one iv uses. For each of those candidates,
6602 this function tries to represent iv uses under that candidate using
6603 other ones with lower local cost, then tries to prune the new set.
6604 If the new set has lower cost, It returns the new cost after recording
6605 candidate replacement in list DELTA. */
6606
6607 static comp_cost
6608 iv_ca_replace (struct ivopts_data *data, struct iv_ca *ivs,
6609 struct iv_ca_delta **delta)
6610 {
6611 bitmap_iterator bi, bj;
6612 unsigned int i, j, k;
6613 struct iv_use *use;
6614 struct iv_cand *cand;
6615 comp_cost orig_cost, acost;
6616 struct iv_ca_delta *act_delta, *tmp_delta;
6617 struct cost_pair *old_cp, *best_cp = NULL;
6618
6619 *delta = NULL;
6620 orig_cost = iv_ca_cost (ivs);
6621
6622 EXECUTE_IF_SET_IN_BITMAP (ivs->cands, 0, i, bi)
6623 {
6624 if (ivs->n_cand_uses[i] == 1
6625 || ivs->n_cand_uses[i] > ALWAYS_PRUNE_CAND_SET_BOUND)
6626 continue;
6627
6628 cand = iv_cand (data, i);
6629
6630 act_delta = NULL;
6631 /* Represent uses under current candidate using other ones with
6632 lower local cost. */
6633 for (j = 0; j < ivs->upto; j++)
6634 {
6635 use = iv_use (data, j);
6636 old_cp = iv_ca_cand_for_use (ivs, use);
6637
6638 if (old_cp->cand != cand)
6639 continue;
6640
6641 best_cp = old_cp;
6642 if (data->consider_all_candidates)
6643 for (k = 0; k < n_iv_cands (data); k++)
6644 best_cp = cheaper_cost_with_cand (data, use, k,
6645 old_cp->cand, best_cp);
6646 else
6647 EXECUTE_IF_SET_IN_BITMAP (use->related_cands, 0, k, bj)
6648 best_cp = cheaper_cost_with_cand (data, use, k,
6649 old_cp->cand, best_cp);
6650
6651 if (best_cp == old_cp)
6652 continue;
6653
6654 act_delta = iv_ca_delta_add (use, old_cp, best_cp, act_delta);
6655 }
6656 /* No need for further prune. */
6657 if (!act_delta)
6658 continue;
6659
6660 /* Prune the new candidate set. */
6661 iv_ca_delta_commit (data, ivs, act_delta, true);
6662 acost = iv_ca_prune (data, ivs, NULL, &tmp_delta);
6663 iv_ca_delta_commit (data, ivs, act_delta, false);
6664 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
6665
6666 if (compare_costs (acost, orig_cost) < 0)
6667 {
6668 *delta = act_delta;
6669 return acost;
6670 }
6671 else
6672 iv_ca_delta_free (&act_delta);
6673 }
6674
6675 return orig_cost;
6676 }
6677
6678 /* Tries to extend the sets IVS in the best possible way in order
6679 to express the USE. If ORIGINALP is true, prefer candidates from
6680 the original set of IVs, otherwise favor important candidates not
6681 based on any memory object. */
6682
6683 static bool
6684 try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
6685 struct iv_use *use, bool originalp)
6686 {
6687 comp_cost best_cost, act_cost;
6688 unsigned i;
6689 bitmap_iterator bi;
6690 struct iv_cand *cand;
6691 struct iv_ca_delta *best_delta = NULL, *act_delta;
6692 struct cost_pair *cp;
6693
6694 iv_ca_add_use (data, ivs, use);
6695 best_cost = iv_ca_cost (ivs);
6696 cp = iv_ca_cand_for_use (ivs, use);
6697 if (cp)
6698 {
6699 best_delta = iv_ca_delta_add (use, NULL, cp, NULL);
6700 iv_ca_set_no_cp (data, ivs, use);
6701 }
6702
6703 /* If ORIGINALP is true, try to find the original IV for the use. Otherwise
6704 first try important candidates not based on any memory object. Only if
6705 this fails, try the specific ones. Rationale -- in loops with many
6706 variables the best choice often is to use just one generic biv. If we
6707 added here many ivs specific to the uses, the optimization algorithm later
6708 would be likely to get stuck in a local minimum, thus causing us to create
6709 too many ivs. The approach from few ivs to more seems more likely to be
6710 successful -- starting from few ivs, replacing an expensive use by a
6711 specific iv should always be a win. */
6712 EXECUTE_IF_SET_IN_BITMAP (use->related_cands, 0, i, bi)
6713 {
6714 cand = iv_cand (data, i);
6715
6716 if (originalp && cand->pos !=IP_ORIGINAL)
6717 continue;
6718
6719 if (!originalp && cand->iv->base_object != NULL_TREE)
6720 continue;
6721
6722 if (iv_ca_cand_used_p (ivs, cand))
6723 continue;
6724
6725 cp = get_use_iv_cost (data, use, cand);
6726 if (!cp)
6727 continue;
6728
6729 iv_ca_set_cp (data, ivs, use, cp);
6730 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL,
6731 true);
6732 iv_ca_set_no_cp (data, ivs, use);
6733 act_delta = iv_ca_delta_add (use, NULL, cp, act_delta);
6734
6735 if (compare_costs (act_cost, best_cost) < 0)
6736 {
6737 best_cost = act_cost;
6738
6739 iv_ca_delta_free (&best_delta);
6740 best_delta = act_delta;
6741 }
6742 else
6743 iv_ca_delta_free (&act_delta);
6744 }
6745
6746 if (infinite_cost_p (best_cost))
6747 {
6748 for (i = 0; i < use->n_map_members; i++)
6749 {
6750 cp = use->cost_map + i;
6751 cand = cp->cand;
6752 if (!cand)
6753 continue;
6754
6755 /* Already tried this. */
6756 if (cand->important)
6757 {
6758 if (originalp && cand->pos == IP_ORIGINAL)
6759 continue;
6760 if (!originalp && cand->iv->base_object == NULL_TREE)
6761 continue;
6762 }
6763
6764 if (iv_ca_cand_used_p (ivs, cand))
6765 continue;
6766
6767 act_delta = NULL;
6768 iv_ca_set_cp (data, ivs, use, cp);
6769 act_cost = iv_ca_extend (data, ivs, cand, &act_delta, NULL, true);
6770 iv_ca_set_no_cp (data, ivs, use);
6771 act_delta = iv_ca_delta_add (use, iv_ca_cand_for_use (ivs, use),
6772 cp, act_delta);
6773
6774 if (compare_costs (act_cost, best_cost) < 0)
6775 {
6776 best_cost = act_cost;
6777
6778 if (best_delta)
6779 iv_ca_delta_free (&best_delta);
6780 best_delta = act_delta;
6781 }
6782 else
6783 iv_ca_delta_free (&act_delta);
6784 }
6785 }
6786
6787 iv_ca_delta_commit (data, ivs, best_delta, true);
6788 iv_ca_delta_free (&best_delta);
6789
6790 return !infinite_cost_p (best_cost);
6791 }
6792
6793 /* Finds an initial assignment of candidates to uses. */
6794
6795 static struct iv_ca *
6796 get_initial_solution (struct ivopts_data *data, bool originalp)
6797 {
6798 struct iv_ca *ivs = iv_ca_new (data);
6799 unsigned i;
6800
6801 for (i = 0; i < n_iv_uses (data); i++)
6802 if (!try_add_cand_for (data, ivs, iv_use (data, i), originalp))
6803 {
6804 iv_ca_free (&ivs);
6805 return NULL;
6806 }
6807
6808 return ivs;
6809 }
6810
6811 /* Tries to improve set of induction variables IVS. TRY_REPLACE_P
6812 points to a bool variable, this function tries to break local
6813 optimal fixed-point by replacing candidates in IVS if it's true. */
6814
6815 static bool
6816 try_improve_iv_set (struct ivopts_data *data,
6817 struct iv_ca *ivs, bool *try_replace_p)
6818 {
6819 unsigned i, n_ivs;
6820 comp_cost acost, best_cost = iv_ca_cost (ivs);
6821 struct iv_ca_delta *best_delta = NULL, *act_delta, *tmp_delta;
6822 struct iv_cand *cand;
6823
6824 /* Try extending the set of induction variables by one. */
6825 for (i = 0; i < n_iv_cands (data); i++)
6826 {
6827 cand = iv_cand (data, i);
6828
6829 if (iv_ca_cand_used_p (ivs, cand))
6830 continue;
6831
6832 acost = iv_ca_extend (data, ivs, cand, &act_delta, &n_ivs, false);
6833 if (!act_delta)
6834 continue;
6835
6836 /* If we successfully added the candidate and the set is small enough,
6837 try optimizing it by removing other candidates. */
6838 if (n_ivs <= ALWAYS_PRUNE_CAND_SET_BOUND)
6839 {
6840 iv_ca_delta_commit (data, ivs, act_delta, true);
6841 acost = iv_ca_prune (data, ivs, cand, &tmp_delta);
6842 iv_ca_delta_commit (data, ivs, act_delta, false);
6843 act_delta = iv_ca_delta_join (act_delta, tmp_delta);
6844 }
6845
6846 if (compare_costs (acost, best_cost) < 0)
6847 {
6848 best_cost = acost;
6849 iv_ca_delta_free (&best_delta);
6850 best_delta = act_delta;
6851 }
6852 else
6853 iv_ca_delta_free (&act_delta);
6854 }
6855
6856 if (!best_delta)
6857 {
6858 /* Try removing the candidates from the set instead. */
6859 best_cost = iv_ca_prune (data, ivs, NULL, &best_delta);
6860
6861 if (!best_delta && *try_replace_p)
6862 {
6863 *try_replace_p = false;
6864 /* So far candidate selecting algorithm tends to choose fewer IVs
6865 so that it can handle cases in which loops have many variables
6866 but the best choice is often to use only one general biv. One
6867 weakness is it can't handle opposite cases, in which different
6868 candidates should be chosen with respect to each use. To solve
6869 the problem, we replace candidates in a manner described by the
6870 comments of iv_ca_replace, thus give general algorithm a chance
6871 to break local optimal fixed-point in these cases. */
6872 best_cost = iv_ca_replace (data, ivs, &best_delta);
6873 }
6874
6875 if (!best_delta)
6876 return false;
6877 }
6878
6879 iv_ca_delta_commit (data, ivs, best_delta, true);
6880 gcc_assert (compare_costs (best_cost, iv_ca_cost (ivs)) == 0);
6881 iv_ca_delta_free (&best_delta);
6882 return true;
6883 }
6884
6885 /* Attempts to find the optimal set of induction variables. We do simple
6886 greedy heuristic -- we try to replace at most one candidate in the selected
6887 solution and remove the unused ivs while this improves the cost. */
6888
6889 static struct iv_ca *
6890 find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
6891 {
6892 struct iv_ca *set;
6893 bool try_replace_p = true;
6894
6895 /* Get the initial solution. */
6896 set = get_initial_solution (data, originalp);
6897 if (!set)
6898 {
6899 if (dump_file && (dump_flags & TDF_DETAILS))
6900 fprintf (dump_file, "Unable to substitute for ivs, failed.\n");
6901 return NULL;
6902 }
6903
6904 if (dump_file && (dump_flags & TDF_DETAILS))
6905 {
6906 fprintf (dump_file, "Initial set of candidates:\n");
6907 iv_ca_dump (data, dump_file, set);
6908 }
6909
6910 while (try_improve_iv_set (data, set, &try_replace_p))
6911 {
6912 if (dump_file && (dump_flags & TDF_DETAILS))
6913 {
6914 fprintf (dump_file, "Improved to:\n");
6915 iv_ca_dump (data, dump_file, set);
6916 }
6917 }
6918
6919 return set;
6920 }
6921
6922 static struct iv_ca *
6923 find_optimal_iv_set (struct ivopts_data *data)
6924 {
6925 unsigned i;
6926 struct iv_ca *set, *origset;
6927 struct iv_use *use;
6928 comp_cost cost, origcost;
6929
6930 /* Determine the cost based on a strategy that starts with original IVs,
6931 and try again using a strategy that prefers candidates not based
6932 on any IVs. */
6933 origset = find_optimal_iv_set_1 (data, true);
6934 set = find_optimal_iv_set_1 (data, false);
6935
6936 if (!origset && !set)
6937 return NULL;
6938
6939 origcost = origset ? iv_ca_cost (origset) : infinite_cost;
6940 cost = set ? iv_ca_cost (set) : infinite_cost;
6941
6942 if (dump_file && (dump_flags & TDF_DETAILS))
6943 {
6944 fprintf (dump_file, "Original cost %d (complexity %d)\n\n",
6945 origcost.cost, origcost.complexity);
6946 fprintf (dump_file, "Final cost %d (complexity %d)\n\n",
6947 cost.cost, cost.complexity);
6948 }
6949
6950 /* Choose the one with the best cost. */
6951 if (compare_costs (origcost, cost) <= 0)
6952 {
6953 if (set)
6954 iv_ca_free (&set);
6955 set = origset;
6956 }
6957 else if (origset)
6958 iv_ca_free (&origset);
6959
6960 for (i = 0; i < n_iv_uses (data); i++)
6961 {
6962 use = iv_use (data, i);
6963 use->selected = iv_ca_cand_for_use (set, use)->cand;
6964 }
6965
6966 return set;
6967 }
6968
6969 /* Creates a new induction variable corresponding to CAND. */
6970
6971 static void
6972 create_new_iv (struct ivopts_data *data, struct iv_cand *cand)
6973 {
6974 gimple_stmt_iterator incr_pos;
6975 tree base;
6976 bool after = false;
6977
6978 if (!cand->iv)
6979 return;
6980
6981 switch (cand->pos)
6982 {
6983 case IP_NORMAL:
6984 incr_pos = gsi_last_bb (ip_normal_pos (data->current_loop));
6985 break;
6986
6987 case IP_END:
6988 incr_pos = gsi_last_bb (ip_end_pos (data->current_loop));
6989 after = true;
6990 break;
6991
6992 case IP_AFTER_USE:
6993 after = true;
6994 /* fall through */
6995 case IP_BEFORE_USE:
6996 incr_pos = gsi_for_stmt (cand->incremented_at);
6997 break;
6998
6999 case IP_ORIGINAL:
7000 /* Mark that the iv is preserved. */
7001 name_info (data, cand->var_before)->preserve_biv = true;
7002 name_info (data, cand->var_after)->preserve_biv = true;
7003
7004 /* Rewrite the increment so that it uses var_before directly. */
7005 find_interesting_uses_op (data, cand->var_after)->selected = cand;
7006 return;
7007 }
7008
7009 gimple_add_tmp_var (cand->var_before);
7010
7011 base = unshare_expr (cand->iv->base);
7012
7013 create_iv (base, unshare_expr (cand->iv->step),
7014 cand->var_before, data->current_loop,
7015 &incr_pos, after, &cand->var_before, &cand->var_after);
7016 }
7017
7018 /* Creates new induction variables described in SET. */
7019
7020 static void
7021 create_new_ivs (struct ivopts_data *data, struct iv_ca *set)
7022 {
7023 unsigned i;
7024 struct iv_cand *cand;
7025 bitmap_iterator bi;
7026
7027 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7028 {
7029 cand = iv_cand (data, i);
7030 create_new_iv (data, cand);
7031 }
7032
7033 if (dump_file && (dump_flags & TDF_DETAILS))
7034 {
7035 fprintf (dump_file, "Selected IV set for loop %d",
7036 data->current_loop->num);
7037 if (data->loop_loc != UNKNOWN_LOCATION)
7038 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
7039 LOCATION_LINE (data->loop_loc));
7040 fprintf (dump_file, ", %lu IVs:\n", bitmap_count_bits (set->cands));
7041 EXECUTE_IF_SET_IN_BITMAP (set->cands, 0, i, bi)
7042 {
7043 cand = iv_cand (data, i);
7044 dump_cand (dump_file, cand);
7045 }
7046 fprintf (dump_file, "\n");
7047 }
7048 }
7049
7050 /* Rewrites USE (definition of iv used in a nonlinear expression)
7051 using candidate CAND. */
7052
7053 static void
7054 rewrite_use_nonlinear_expr (struct ivopts_data *data,
7055 struct iv_use *use, struct iv_cand *cand)
7056 {
7057 tree comp;
7058 tree op, tgt;
7059 gassign *ass;
7060 gimple_stmt_iterator bsi;
7061
7062 /* An important special case -- if we are asked to express value of
7063 the original iv by itself, just exit; there is no need to
7064 introduce a new computation (that might also need casting the
7065 variable to unsigned and back). */
7066 if (cand->pos == IP_ORIGINAL
7067 && cand->incremented_at == use->stmt)
7068 {
7069 enum tree_code stmt_code;
7070
7071 gcc_assert (is_gimple_assign (use->stmt));
7072 gcc_assert (gimple_assign_lhs (use->stmt) == cand->var_after);
7073
7074 /* Check whether we may leave the computation unchanged.
7075 This is the case only if it does not rely on other
7076 computations in the loop -- otherwise, the computation
7077 we rely upon may be removed in remove_unused_ivs,
7078 thus leading to ICE. */
7079 stmt_code = gimple_assign_rhs_code (use->stmt);
7080 if (stmt_code == PLUS_EXPR
7081 || stmt_code == MINUS_EXPR
7082 || stmt_code == POINTER_PLUS_EXPR)
7083 {
7084 if (gimple_assign_rhs1 (use->stmt) == cand->var_before)
7085 op = gimple_assign_rhs2 (use->stmt);
7086 else if (gimple_assign_rhs2 (use->stmt) == cand->var_before)
7087 op = gimple_assign_rhs1 (use->stmt);
7088 else
7089 op = NULL_TREE;
7090 }
7091 else
7092 op = NULL_TREE;
7093
7094 if (op && expr_invariant_in_loop_p (data->current_loop, op))
7095 return;
7096 }
7097
7098 comp = get_computation (data->current_loop, use, cand);
7099 gcc_assert (comp != NULL_TREE);
7100
7101 switch (gimple_code (use->stmt))
7102 {
7103 case GIMPLE_PHI:
7104 tgt = PHI_RESULT (use->stmt);
7105
7106 /* If we should keep the biv, do not replace it. */
7107 if (name_info (data, tgt)->preserve_biv)
7108 return;
7109
7110 bsi = gsi_after_labels (gimple_bb (use->stmt));
7111 break;
7112
7113 case GIMPLE_ASSIGN:
7114 tgt = gimple_assign_lhs (use->stmt);
7115 bsi = gsi_for_stmt (use->stmt);
7116 break;
7117
7118 default:
7119 gcc_unreachable ();
7120 }
7121
7122 if (!valid_gimple_rhs_p (comp)
7123 || (gimple_code (use->stmt) != GIMPLE_PHI
7124 /* We can't allow re-allocating the stmt as it might be pointed
7125 to still. */
7126 && (get_gimple_rhs_num_ops (TREE_CODE (comp))
7127 >= gimple_num_ops (gsi_stmt (bsi)))))
7128 {
7129 comp = force_gimple_operand_gsi (&bsi, comp, true, NULL_TREE,
7130 true, GSI_SAME_STMT);
7131 if (POINTER_TYPE_P (TREE_TYPE (tgt)))
7132 {
7133 duplicate_ssa_name_ptr_info (comp, SSA_NAME_PTR_INFO (tgt));
7134 /* As this isn't a plain copy we have to reset alignment
7135 information. */
7136 if (SSA_NAME_PTR_INFO (comp))
7137 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (comp));
7138 }
7139 }
7140
7141 if (gimple_code (use->stmt) == GIMPLE_PHI)
7142 {
7143 ass = gimple_build_assign (tgt, comp);
7144 gsi_insert_before (&bsi, ass, GSI_SAME_STMT);
7145
7146 bsi = gsi_for_stmt (use->stmt);
7147 remove_phi_node (&bsi, false);
7148 }
7149 else
7150 {
7151 gimple_assign_set_rhs_from_tree (&bsi, comp);
7152 use->stmt = gsi_stmt (bsi);
7153 }
7154 }
7155
7156 /* Performs a peephole optimization to reorder the iv update statement with
7157 a mem ref to enable instruction combining in later phases. The mem ref uses
7158 the iv value before the update, so the reordering transformation requires
7159 adjustment of the offset. CAND is the selected IV_CAND.
7160
7161 Example:
7162
7163 t = MEM_REF (base, iv1, 8, 16); // base, index, stride, offset
7164 iv2 = iv1 + 1;
7165
7166 if (t < val) (1)
7167 goto L;
7168 goto Head;
7169
7170
7171 directly propagating t over to (1) will introduce overlapping live range
7172 thus increase register pressure. This peephole transform it into:
7173
7174
7175 iv2 = iv1 + 1;
7176 t = MEM_REF (base, iv2, 8, 8);
7177 if (t < val)
7178 goto L;
7179 goto Head;
7180 */
7181
7182 static void
7183 adjust_iv_update_pos (struct iv_cand *cand, struct iv_use *use)
7184 {
7185 tree var_after;
7186 gimple *iv_update, *stmt;
7187 basic_block bb;
7188 gimple_stmt_iterator gsi, gsi_iv;
7189
7190 if (cand->pos != IP_NORMAL)
7191 return;
7192
7193 var_after = cand->var_after;
7194 iv_update = SSA_NAME_DEF_STMT (var_after);
7195
7196 bb = gimple_bb (iv_update);
7197 gsi = gsi_last_nondebug_bb (bb);
7198 stmt = gsi_stmt (gsi);
7199
7200 /* Only handle conditional statement for now. */
7201 if (gimple_code (stmt) != GIMPLE_COND)
7202 return;
7203
7204 gsi_prev_nondebug (&gsi);
7205 stmt = gsi_stmt (gsi);
7206 if (stmt != iv_update)
7207 return;
7208
7209 gsi_prev_nondebug (&gsi);
7210 if (gsi_end_p (gsi))
7211 return;
7212
7213 stmt = gsi_stmt (gsi);
7214 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7215 return;
7216
7217 if (stmt != use->stmt)
7218 return;
7219
7220 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
7221 return;
7222
7223 if (dump_file && (dump_flags & TDF_DETAILS))
7224 {
7225 fprintf (dump_file, "Reordering \n");
7226 print_gimple_stmt (dump_file, iv_update, 0, 0);
7227 print_gimple_stmt (dump_file, use->stmt, 0, 0);
7228 fprintf (dump_file, "\n");
7229 }
7230
7231 gsi = gsi_for_stmt (use->stmt);
7232 gsi_iv = gsi_for_stmt (iv_update);
7233 gsi_move_before (&gsi_iv, &gsi);
7234
7235 cand->pos = IP_BEFORE_USE;
7236 cand->incremented_at = use->stmt;
7237 }
7238
7239 /* Rewrites USE (address that is an iv) using candidate CAND. */
7240
7241 static void
7242 rewrite_use_address_1 (struct ivopts_data *data,
7243 struct iv_use *use, struct iv_cand *cand)
7244 {
7245 aff_tree aff;
7246 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7247 tree base_hint = NULL_TREE;
7248 tree ref, iv;
7249 bool ok;
7250
7251 adjust_iv_update_pos (cand, use);
7252 ok = get_computation_aff (data->current_loop, use, cand, use->stmt, &aff);
7253 gcc_assert (ok);
7254 unshare_aff_combination (&aff);
7255
7256 /* To avoid undefined overflow problems, all IV candidates use unsigned
7257 integer types. The drawback is that this makes it impossible for
7258 create_mem_ref to distinguish an IV that is based on a memory object
7259 from one that represents simply an offset.
7260
7261 To work around this problem, we pass a hint to create_mem_ref that
7262 indicates which variable (if any) in aff is an IV based on a memory
7263 object. Note that we only consider the candidate. If this is not
7264 based on an object, the base of the reference is in some subexpression
7265 of the use -- but these will use pointer types, so they are recognized
7266 by the create_mem_ref heuristics anyway. */
7267 if (cand->iv->base_object)
7268 base_hint = var_at_stmt (data->current_loop, cand, use->stmt);
7269
7270 iv = var_at_stmt (data->current_loop, cand, use->stmt);
7271 ref = create_mem_ref (&bsi, TREE_TYPE (*use->op_p), &aff,
7272 reference_alias_ptr_type (*use->op_p),
7273 iv, base_hint, data->speed);
7274 copy_ref_info (ref, *use->op_p);
7275 *use->op_p = ref;
7276 }
7277
7278 /* Rewrites USE (address that is an iv) using candidate CAND. If it's the
7279 first use of a group, rewrites sub uses in the group too. */
7280
7281 static void
7282 rewrite_use_address (struct ivopts_data *data,
7283 struct iv_use *use, struct iv_cand *cand)
7284 {
7285 struct iv_use *next;
7286
7287 gcc_assert (use->sub_id == 0);
7288 rewrite_use_address_1 (data, use, cand);
7289 update_stmt (use->stmt);
7290
7291 for (next = use->next; next != NULL; next = next->next)
7292 {
7293 rewrite_use_address_1 (data, next, cand);
7294 update_stmt (next->stmt);
7295 }
7296
7297 return;
7298 }
7299
7300 /* Rewrites USE (the condition such that one of the arguments is an iv) using
7301 candidate CAND. */
7302
7303 static void
7304 rewrite_use_compare (struct ivopts_data *data,
7305 struct iv_use *use, struct iv_cand *cand)
7306 {
7307 tree comp, *var_p, op, bound;
7308 gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
7309 enum tree_code compare;
7310 struct cost_pair *cp = get_use_iv_cost (data, use, cand);
7311 bool ok;
7312
7313 bound = cp->value;
7314 if (bound)
7315 {
7316 tree var = var_at_stmt (data->current_loop, cand, use->stmt);
7317 tree var_type = TREE_TYPE (var);
7318 gimple_seq stmts;
7319
7320 if (dump_file && (dump_flags & TDF_DETAILS))
7321 {
7322 fprintf (dump_file, "Replacing exit test: ");
7323 print_gimple_stmt (dump_file, use->stmt, 0, TDF_SLIM);
7324 }
7325 compare = cp->comp;
7326 bound = unshare_expr (fold_convert (var_type, bound));
7327 op = force_gimple_operand (bound, &stmts, true, NULL_TREE);
7328 if (stmts)
7329 gsi_insert_seq_on_edge_immediate (
7330 loop_preheader_edge (data->current_loop),
7331 stmts);
7332
7333 gcond *cond_stmt = as_a <gcond *> (use->stmt);
7334 gimple_cond_set_lhs (cond_stmt, var);
7335 gimple_cond_set_code (cond_stmt, compare);
7336 gimple_cond_set_rhs (cond_stmt, op);
7337 return;
7338 }
7339
7340 /* The induction variable elimination failed; just express the original
7341 giv. */
7342 comp = get_computation (data->current_loop, use, cand);
7343 gcc_assert (comp != NULL_TREE);
7344
7345 ok = extract_cond_operands (data, use->stmt, &var_p, NULL, NULL, NULL);
7346 gcc_assert (ok);
7347
7348 *var_p = force_gimple_operand_gsi (&bsi, comp, true, SSA_NAME_VAR (*var_p),
7349 true, GSI_SAME_STMT);
7350 }
7351
7352 /* Rewrites USE using candidate CAND. */
7353
7354 static void
7355 rewrite_use (struct ivopts_data *data, struct iv_use *use, struct iv_cand *cand)
7356 {
7357 switch (use->type)
7358 {
7359 case USE_NONLINEAR_EXPR:
7360 rewrite_use_nonlinear_expr (data, use, cand);
7361 break;
7362
7363 case USE_ADDRESS:
7364 rewrite_use_address (data, use, cand);
7365 break;
7366
7367 case USE_COMPARE:
7368 rewrite_use_compare (data, use, cand);
7369 break;
7370
7371 default:
7372 gcc_unreachable ();
7373 }
7374
7375 update_stmt (use->stmt);
7376 }
7377
7378 /* Rewrite the uses using the selected induction variables. */
7379
7380 static void
7381 rewrite_uses (struct ivopts_data *data)
7382 {
7383 unsigned i;
7384 struct iv_cand *cand;
7385 struct iv_use *use;
7386
7387 for (i = 0; i < n_iv_uses (data); i++)
7388 {
7389 use = iv_use (data, i);
7390 cand = use->selected;
7391 gcc_assert (cand);
7392
7393 rewrite_use (data, use, cand);
7394 }
7395 }
7396
7397 /* Removes the ivs that are not used after rewriting. */
7398
7399 static void
7400 remove_unused_ivs (struct ivopts_data *data)
7401 {
7402 unsigned j;
7403 bitmap_iterator bi;
7404 bitmap toremove = BITMAP_ALLOC (NULL);
7405
7406 /* Figure out an order in which to release SSA DEFs so that we don't
7407 release something that we'd have to propagate into a debug stmt
7408 afterwards. */
7409 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, j, bi)
7410 {
7411 struct version_info *info;
7412
7413 info = ver_info (data, j);
7414 if (info->iv
7415 && !integer_zerop (info->iv->step)
7416 && !info->inv_id
7417 && !info->iv->have_use_for
7418 && !info->preserve_biv)
7419 {
7420 bitmap_set_bit (toremove, SSA_NAME_VERSION (info->iv->ssa_name));
7421
7422 tree def = info->iv->ssa_name;
7423
7424 if (MAY_HAVE_DEBUG_STMTS && SSA_NAME_DEF_STMT (def))
7425 {
7426 imm_use_iterator imm_iter;
7427 use_operand_p use_p;
7428 gimple *stmt;
7429 int count = 0;
7430
7431 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7432 {
7433 if (!gimple_debug_bind_p (stmt))
7434 continue;
7435
7436 /* We just want to determine whether to do nothing
7437 (count == 0), to substitute the computed
7438 expression into a single use of the SSA DEF by
7439 itself (count == 1), or to use a debug temp
7440 because the SSA DEF is used multiple times or as
7441 part of a larger expression (count > 1). */
7442 count++;
7443 if (gimple_debug_bind_get_value (stmt) != def)
7444 count++;
7445
7446 if (count > 1)
7447 BREAK_FROM_IMM_USE_STMT (imm_iter);
7448 }
7449
7450 if (!count)
7451 continue;
7452
7453 struct iv_use dummy_use;
7454 struct iv_cand *best_cand = NULL, *cand;
7455 unsigned i, best_pref = 0, cand_pref;
7456
7457 memset (&dummy_use, 0, sizeof (dummy_use));
7458 dummy_use.iv = info->iv;
7459 for (i = 0; i < n_iv_uses (data) && i < 64; i++)
7460 {
7461 cand = iv_use (data, i)->selected;
7462 if (cand == best_cand)
7463 continue;
7464 cand_pref = operand_equal_p (cand->iv->step,
7465 info->iv->step, 0)
7466 ? 4 : 0;
7467 cand_pref
7468 += TYPE_MODE (TREE_TYPE (cand->iv->base))
7469 == TYPE_MODE (TREE_TYPE (info->iv->base))
7470 ? 2 : 0;
7471 cand_pref
7472 += TREE_CODE (cand->iv->base) == INTEGER_CST
7473 ? 1 : 0;
7474 if (best_cand == NULL || best_pref < cand_pref)
7475 {
7476 best_cand = cand;
7477 best_pref = cand_pref;
7478 }
7479 }
7480
7481 if (!best_cand)
7482 continue;
7483
7484 tree comp = get_computation_at (data->current_loop,
7485 &dummy_use, best_cand,
7486 SSA_NAME_DEF_STMT (def));
7487 if (!comp)
7488 continue;
7489
7490 if (count > 1)
7491 {
7492 tree vexpr = make_node (DEBUG_EXPR_DECL);
7493 DECL_ARTIFICIAL (vexpr) = 1;
7494 TREE_TYPE (vexpr) = TREE_TYPE (comp);
7495 if (SSA_NAME_VAR (def))
7496 DECL_MODE (vexpr) = DECL_MODE (SSA_NAME_VAR (def));
7497 else
7498 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (vexpr));
7499 gdebug *def_temp
7500 = gimple_build_debug_bind (vexpr, comp, NULL);
7501 gimple_stmt_iterator gsi;
7502
7503 if (gimple_code (SSA_NAME_DEF_STMT (def)) == GIMPLE_PHI)
7504 gsi = gsi_after_labels (gimple_bb
7505 (SSA_NAME_DEF_STMT (def)));
7506 else
7507 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (def));
7508
7509 gsi_insert_before (&gsi, def_temp, GSI_SAME_STMT);
7510 comp = vexpr;
7511 }
7512
7513 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, def)
7514 {
7515 if (!gimple_debug_bind_p (stmt))
7516 continue;
7517
7518 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7519 SET_USE (use_p, comp);
7520
7521 update_stmt (stmt);
7522 }
7523 }
7524 }
7525 }
7526
7527 release_defs_bitset (toremove);
7528
7529 BITMAP_FREE (toremove);
7530 }
7531
7532 /* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
7533 for hash_map::traverse. */
7534
7535 bool
7536 free_tree_niter_desc (edge const &, tree_niter_desc *const &value, void *)
7537 {
7538 free (value);
7539 return true;
7540 }
7541
7542 /* Frees data allocated by the optimization of a single loop. */
7543
7544 static void
7545 free_loop_data (struct ivopts_data *data)
7546 {
7547 unsigned i, j;
7548 bitmap_iterator bi;
7549 tree obj;
7550
7551 if (data->niters)
7552 {
7553 data->niters->traverse<void *, free_tree_niter_desc> (NULL);
7554 delete data->niters;
7555 data->niters = NULL;
7556 }
7557
7558 EXECUTE_IF_SET_IN_BITMAP (data->relevant, 0, i, bi)
7559 {
7560 struct version_info *info;
7561
7562 info = ver_info (data, i);
7563 info->iv = NULL;
7564 info->has_nonlin_use = false;
7565 info->preserve_biv = false;
7566 info->inv_id = 0;
7567 }
7568 bitmap_clear (data->relevant);
7569 bitmap_clear (data->important_candidates);
7570
7571 for (i = 0; i < n_iv_uses (data); i++)
7572 {
7573 struct iv_use *use = iv_use (data, i);
7574 struct iv_use *pre = use, *sub = use->next;
7575
7576 while (sub)
7577 {
7578 gcc_assert (sub->related_cands == NULL);
7579 gcc_assert (sub->n_map_members == 0 && sub->cost_map == NULL);
7580
7581 pre = sub;
7582 sub = sub->next;
7583 free (pre);
7584 }
7585
7586 BITMAP_FREE (use->related_cands);
7587 for (j = 0; j < use->n_map_members; j++)
7588 if (use->cost_map[j].depends_on)
7589 BITMAP_FREE (use->cost_map[j].depends_on);
7590 free (use->cost_map);
7591 free (use);
7592 }
7593 data->iv_uses.truncate (0);
7594
7595 for (i = 0; i < n_iv_cands (data); i++)
7596 {
7597 struct iv_cand *cand = iv_cand (data, i);
7598
7599 if (cand->depends_on)
7600 BITMAP_FREE (cand->depends_on);
7601 free (cand);
7602 }
7603 data->iv_candidates.truncate (0);
7604
7605 if (data->version_info_size < num_ssa_names)
7606 {
7607 data->version_info_size = 2 * num_ssa_names;
7608 free (data->version_info);
7609 data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
7610 }
7611
7612 data->max_inv_id = 0;
7613
7614 FOR_EACH_VEC_ELT (decl_rtl_to_reset, i, obj)
7615 SET_DECL_RTL (obj, NULL_RTX);
7616
7617 decl_rtl_to_reset.truncate (0);
7618
7619 data->inv_expr_tab->empty ();
7620 data->inv_expr_id = 0;
7621
7622 data->iv_common_cand_tab->empty ();
7623 data->iv_common_cands.truncate (0);
7624 }
7625
7626 /* Finalizes data structures used by the iv optimization pass. LOOPS is the
7627 loop tree. */
7628
7629 static void
7630 tree_ssa_iv_optimize_finalize (struct ivopts_data *data)
7631 {
7632 free_loop_data (data);
7633 free (data->version_info);
7634 BITMAP_FREE (data->relevant);
7635 BITMAP_FREE (data->important_candidates);
7636
7637 decl_rtl_to_reset.release ();
7638 data->iv_uses.release ();
7639 data->iv_candidates.release ();
7640 delete data->inv_expr_tab;
7641 data->inv_expr_tab = NULL;
7642 free_affine_expand_cache (&data->name_expansion_cache);
7643 delete data->iv_common_cand_tab;
7644 data->iv_common_cand_tab = NULL;
7645 data->iv_common_cands.release ();
7646 obstack_free (&data->iv_obstack, NULL);
7647 }
7648
7649 /* Returns true if the loop body BODY includes any function calls. */
7650
7651 static bool
7652 loop_body_includes_call (basic_block *body, unsigned num_nodes)
7653 {
7654 gimple_stmt_iterator gsi;
7655 unsigned i;
7656
7657 for (i = 0; i < num_nodes; i++)
7658 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
7659 {
7660 gimple *stmt = gsi_stmt (gsi);
7661 if (is_gimple_call (stmt)
7662 && !is_inexpensive_builtin (gimple_call_fndecl (stmt)))
7663 return true;
7664 }
7665 return false;
7666 }
7667
7668 /* Optimizes the LOOP. Returns true if anything changed. */
7669
7670 static bool
7671 tree_ssa_iv_optimize_loop (struct ivopts_data *data, struct loop *loop)
7672 {
7673 bool changed = false;
7674 struct iv_ca *iv_ca;
7675 edge exit = single_dom_exit (loop);
7676 basic_block *body;
7677
7678 gcc_assert (!data->niters);
7679 data->current_loop = loop;
7680 data->loop_loc = find_loop_location (loop);
7681 data->speed = optimize_loop_for_speed_p (loop);
7682
7683 if (dump_file && (dump_flags & TDF_DETAILS))
7684 {
7685 fprintf (dump_file, "Processing loop %d", loop->num);
7686 if (data->loop_loc != UNKNOWN_LOCATION)
7687 fprintf (dump_file, " at %s:%d", LOCATION_FILE (data->loop_loc),
7688 LOCATION_LINE (data->loop_loc));
7689 fprintf (dump_file, "\n");
7690
7691 if (exit)
7692 {
7693 fprintf (dump_file, " single exit %d -> %d, exit condition ",
7694 exit->src->index, exit->dest->index);
7695 print_gimple_stmt (dump_file, last_stmt (exit->src), 0, TDF_SLIM);
7696 fprintf (dump_file, "\n");
7697 }
7698
7699 fprintf (dump_file, "\n");
7700 }
7701
7702 body = get_loop_body (loop);
7703 data->body_includes_call = loop_body_includes_call (body, loop->num_nodes);
7704 renumber_gimple_stmt_uids_in_blocks (body, loop->num_nodes);
7705 free (body);
7706
7707 data->loop_single_exit_p = exit != NULL && loop_only_exit_p (loop, exit);
7708
7709 /* For each ssa name determines whether it behaves as an induction variable
7710 in some loop. */
7711 if (!find_induction_variables (data))
7712 goto finish;
7713
7714 /* Finds interesting uses (item 1). */
7715 find_interesting_uses (data);
7716 group_address_uses (data);
7717 if (n_iv_uses (data) > MAX_CONSIDERED_USES)
7718 goto finish;
7719
7720 /* Finds candidates for the induction variables (item 2). */
7721 find_iv_candidates (data);
7722
7723 /* Calculates the costs (item 3, part 1). */
7724 determine_iv_costs (data);
7725 determine_use_iv_costs (data);
7726 determine_set_costs (data);
7727
7728 /* Find the optimal set of induction variables (item 3, part 2). */
7729 iv_ca = find_optimal_iv_set (data);
7730 if (!iv_ca)
7731 goto finish;
7732 changed = true;
7733
7734 /* Create the new induction variables (item 4, part 1). */
7735 create_new_ivs (data, iv_ca);
7736 iv_ca_free (&iv_ca);
7737
7738 /* Rewrite the uses (item 4, part 2). */
7739 rewrite_uses (data);
7740
7741 /* Remove the ivs that are unused after rewriting. */
7742 remove_unused_ivs (data);
7743
7744 /* We have changed the structure of induction variables; it might happen
7745 that definitions in the scev database refer to some of them that were
7746 eliminated. */
7747 scev_reset ();
7748
7749 finish:
7750 free_loop_data (data);
7751
7752 return changed;
7753 }
7754
7755 /* Main entry point. Optimizes induction variables in loops. */
7756
7757 void
7758 tree_ssa_iv_optimize (void)
7759 {
7760 struct loop *loop;
7761 struct ivopts_data data;
7762
7763 tree_ssa_iv_optimize_init (&data);
7764
7765 /* Optimize the loops starting with the innermost ones. */
7766 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
7767 {
7768 if (dump_file && (dump_flags & TDF_DETAILS))
7769 flow_loop_dump (loop, dump_file, NULL, 1);
7770
7771 tree_ssa_iv_optimize_loop (&data, loop);
7772 }
7773
7774 tree_ssa_iv_optimize_finalize (&data);
7775 }