]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/loop-invariant.c
* config/microblaze/microblaze.c (microblaze_expand_block_move): Treat
[thirdparty/gcc.git] / gcc / loop-invariant.c
1 /* RTL-level loop invariant motion.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This implements the loop invariant motion pass. It is very simple
21 (no calls, no loads/stores, etc.). This should be sufficient to cleanup
22 things like address arithmetics -- other more complicated invariants should
23 be eliminated on GIMPLE either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
24
25 We proceed loop by loop -- it is simpler than trying to handle things
26 globally and should not lose much. First we inspect all sets inside loop
27 and create a dependency graph on insns (saying "to move this insn, you must
28 also move the following insns").
29
30 We then need to determine what to move. We estimate the number of registers
31 used and move as many invariants as possible while we still have enough free
32 registers. We prefer the expensive invariants.
33
34 Then we move the selected invariants out of the loop, creating a new
35 temporaries for them if necessary. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "coretypes.h"
40 #include "backend.h"
41 #include "target.h"
42 #include "rtl.h"
43 #include "tree.h"
44 #include "cfghooks.h"
45 #include "df.h"
46 #include "memmodel.h"
47 #include "tm_p.h"
48 #include "insn-config.h"
49 #include "regs.h"
50 #include "ira.h"
51 #include "recog.h"
52 #include "cfgrtl.h"
53 #include "cfgloop.h"
54 #include "expr.h"
55 #include "params.h"
56 #include "rtl-iter.h"
57 #include "dumpfile.h"
58
59 /* The data stored for the loop. */
60
61 struct loop_data
62 {
63 struct loop *outermost_exit; /* The outermost exit of the loop. */
64 bool has_call; /* True if the loop contains a call. */
65 /* Maximal register pressure inside loop for given register class
66 (defined only for the pressure classes). */
67 int max_reg_pressure[N_REG_CLASSES];
68 /* Loop regs referenced and live pseudo-registers. */
69 bitmap_head regs_ref;
70 bitmap_head regs_live;
71 };
72
73 #define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
74
75 /* The description of an use. */
76
77 struct use
78 {
79 rtx *pos; /* Position of the use. */
80 rtx_insn *insn; /* The insn in that the use occurs. */
81 unsigned addr_use_p; /* Whether the use occurs in an address. */
82 struct use *next; /* Next use in the list. */
83 };
84
85 /* The description of a def. */
86
87 struct def
88 {
89 struct use *uses; /* The list of uses that are uniquely reached
90 by it. */
91 unsigned n_uses; /* Number of such uses. */
92 unsigned n_addr_uses; /* Number of uses in addresses. */
93 unsigned invno; /* The corresponding invariant. */
94 bool can_prop_to_addr_uses; /* True if the corresponding inv can be
95 propagated into its address uses. */
96 };
97
98 /* The data stored for each invariant. */
99
100 struct invariant
101 {
102 /* The number of the invariant. */
103 unsigned invno;
104
105 /* The number of the invariant with the same value. */
106 unsigned eqto;
107
108 /* The number of invariants which eqto this. */
109 unsigned eqno;
110
111 /* If we moved the invariant out of the loop, the original regno
112 that contained its value. */
113 int orig_regno;
114
115 /* If we moved the invariant out of the loop, the register that contains its
116 value. */
117 rtx reg;
118
119 /* The definition of the invariant. */
120 struct def *def;
121
122 /* The insn in that it is defined. */
123 rtx_insn *insn;
124
125 /* Whether it is always executed. */
126 bool always_executed;
127
128 /* Whether to move the invariant. */
129 bool move;
130
131 /* Whether the invariant is cheap when used as an address. */
132 bool cheap_address;
133
134 /* Cost of the invariant. */
135 unsigned cost;
136
137 /* Used for detecting already visited invariants during determining
138 costs of movements. */
139 unsigned stamp;
140
141 /* The invariants it depends on. */
142 bitmap depends_on;
143 };
144
145 /* Currently processed loop. */
146 static struct loop *curr_loop;
147
148 /* Table of invariants indexed by the df_ref uid field. */
149
150 static unsigned int invariant_table_size = 0;
151 static struct invariant ** invariant_table;
152
153 /* Entry for hash table of invariant expressions. */
154
155 struct invariant_expr_entry
156 {
157 /* The invariant. */
158 struct invariant *inv;
159
160 /* Its value. */
161 rtx expr;
162
163 /* Its mode. */
164 machine_mode mode;
165
166 /* Its hash. */
167 hashval_t hash;
168 };
169
170 /* The actual stamp for marking already visited invariants during determining
171 costs of movements. */
172
173 static unsigned actual_stamp;
174
175 typedef struct invariant *invariant_p;
176
177
178 /* The invariants. */
179
180 static vec<invariant_p> invariants;
181
182 /* Check the size of the invariant table and realloc if necessary. */
183
184 static void
185 check_invariant_table_size (void)
186 {
187 if (invariant_table_size < DF_DEFS_TABLE_SIZE ())
188 {
189 unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
190 invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
191 memset (&invariant_table[invariant_table_size], 0,
192 (new_size - invariant_table_size) * sizeof (struct invariant *));
193 invariant_table_size = new_size;
194 }
195 }
196
197 /* Test for possibility of invariantness of X. */
198
199 static bool
200 check_maybe_invariant (rtx x)
201 {
202 enum rtx_code code = GET_CODE (x);
203 int i, j;
204 const char *fmt;
205
206 switch (code)
207 {
208 CASE_CONST_ANY:
209 case SYMBOL_REF:
210 case CONST:
211 case LABEL_REF:
212 return true;
213
214 case PC:
215 case CC0:
216 case UNSPEC_VOLATILE:
217 case CALL:
218 return false;
219
220 case REG:
221 return true;
222
223 case MEM:
224 /* Load/store motion is done elsewhere. ??? Perhaps also add it here?
225 It should not be hard, and might be faster than "elsewhere". */
226
227 /* Just handle the most trivial case where we load from an unchanging
228 location (most importantly, pic tables). */
229 if (MEM_READONLY_P (x) && !MEM_VOLATILE_P (x))
230 break;
231
232 return false;
233
234 case ASM_OPERANDS:
235 /* Don't mess with insns declared volatile. */
236 if (MEM_VOLATILE_P (x))
237 return false;
238 break;
239
240 default:
241 break;
242 }
243
244 fmt = GET_RTX_FORMAT (code);
245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246 {
247 if (fmt[i] == 'e')
248 {
249 if (!check_maybe_invariant (XEXP (x, i)))
250 return false;
251 }
252 else if (fmt[i] == 'E')
253 {
254 for (j = 0; j < XVECLEN (x, i); j++)
255 if (!check_maybe_invariant (XVECEXP (x, i, j)))
256 return false;
257 }
258 }
259
260 return true;
261 }
262
263 /* Returns the invariant definition for USE, or NULL if USE is not
264 invariant. */
265
266 static struct invariant *
267 invariant_for_use (df_ref use)
268 {
269 struct df_link *defs;
270 df_ref def;
271 basic_block bb = DF_REF_BB (use), def_bb;
272
273 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
274 return NULL;
275
276 defs = DF_REF_CHAIN (use);
277 if (!defs || defs->next)
278 return NULL;
279 def = defs->ref;
280 check_invariant_table_size ();
281 if (!invariant_table[DF_REF_ID (def)])
282 return NULL;
283
284 def_bb = DF_REF_BB (def);
285 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
286 return NULL;
287 return invariant_table[DF_REF_ID (def)];
288 }
289
290 /* Computes hash value for invariant expression X in INSN. */
291
292 static hashval_t
293 hash_invariant_expr_1 (rtx_insn *insn, rtx x)
294 {
295 enum rtx_code code = GET_CODE (x);
296 int i, j;
297 const char *fmt;
298 hashval_t val = code;
299 int do_not_record_p;
300 df_ref use;
301 struct invariant *inv;
302
303 switch (code)
304 {
305 CASE_CONST_ANY:
306 case SYMBOL_REF:
307 case CONST:
308 case LABEL_REF:
309 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
310
311 case REG:
312 use = df_find_use (insn, x);
313 if (!use)
314 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
315 inv = invariant_for_use (use);
316 if (!inv)
317 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
318
319 gcc_assert (inv->eqto != ~0u);
320 return inv->eqto;
321
322 default:
323 break;
324 }
325
326 fmt = GET_RTX_FORMAT (code);
327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328 {
329 if (fmt[i] == 'e')
330 val ^= hash_invariant_expr_1 (insn, XEXP (x, i));
331 else if (fmt[i] == 'E')
332 {
333 for (j = 0; j < XVECLEN (x, i); j++)
334 val ^= hash_invariant_expr_1 (insn, XVECEXP (x, i, j));
335 }
336 else if (fmt[i] == 'i' || fmt[i] == 'n')
337 val ^= XINT (x, i);
338 else if (fmt[i] == 'p')
339 val ^= constant_lower_bound (SUBREG_BYTE (x));
340 }
341
342 return val;
343 }
344
345 /* Returns true if the invariant expressions E1 and E2 used in insns INSN1
346 and INSN2 have always the same value. */
347
348 static bool
349 invariant_expr_equal_p (rtx_insn *insn1, rtx e1, rtx_insn *insn2, rtx e2)
350 {
351 enum rtx_code code = GET_CODE (e1);
352 int i, j;
353 const char *fmt;
354 df_ref use1, use2;
355 struct invariant *inv1 = NULL, *inv2 = NULL;
356 rtx sub1, sub2;
357
358 /* If mode of only one of the operands is VOIDmode, it is not equivalent to
359 the other one. If both are VOIDmode, we rely on the caller of this
360 function to verify that their modes are the same. */
361 if (code != GET_CODE (e2) || GET_MODE (e1) != GET_MODE (e2))
362 return false;
363
364 switch (code)
365 {
366 CASE_CONST_ANY:
367 case SYMBOL_REF:
368 case CONST:
369 case LABEL_REF:
370 return rtx_equal_p (e1, e2);
371
372 case REG:
373 use1 = df_find_use (insn1, e1);
374 use2 = df_find_use (insn2, e2);
375 if (use1)
376 inv1 = invariant_for_use (use1);
377 if (use2)
378 inv2 = invariant_for_use (use2);
379
380 if (!inv1 && !inv2)
381 return rtx_equal_p (e1, e2);
382
383 if (!inv1 || !inv2)
384 return false;
385
386 gcc_assert (inv1->eqto != ~0u);
387 gcc_assert (inv2->eqto != ~0u);
388 return inv1->eqto == inv2->eqto;
389
390 default:
391 break;
392 }
393
394 fmt = GET_RTX_FORMAT (code);
395 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
396 {
397 if (fmt[i] == 'e')
398 {
399 sub1 = XEXP (e1, i);
400 sub2 = XEXP (e2, i);
401
402 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
403 return false;
404 }
405
406 else if (fmt[i] == 'E')
407 {
408 if (XVECLEN (e1, i) != XVECLEN (e2, i))
409 return false;
410
411 for (j = 0; j < XVECLEN (e1, i); j++)
412 {
413 sub1 = XVECEXP (e1, i, j);
414 sub2 = XVECEXP (e2, i, j);
415
416 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
417 return false;
418 }
419 }
420 else if (fmt[i] == 'i' || fmt[i] == 'n')
421 {
422 if (XINT (e1, i) != XINT (e2, i))
423 return false;
424 }
425 else if (fmt[i] == 'p')
426 {
427 if (maybe_ne (SUBREG_BYTE (e1), SUBREG_BYTE (e2)))
428 return false;
429 }
430 /* Unhandled type of subexpression, we fail conservatively. */
431 else
432 return false;
433 }
434
435 return true;
436 }
437
438 struct invariant_expr_hasher : free_ptr_hash <invariant_expr_entry>
439 {
440 static inline hashval_t hash (const invariant_expr_entry *);
441 static inline bool equal (const invariant_expr_entry *,
442 const invariant_expr_entry *);
443 };
444
445 /* Returns hash value for invariant expression entry ENTRY. */
446
447 inline hashval_t
448 invariant_expr_hasher::hash (const invariant_expr_entry *entry)
449 {
450 return entry->hash;
451 }
452
453 /* Compares invariant expression entries ENTRY1 and ENTRY2. */
454
455 inline bool
456 invariant_expr_hasher::equal (const invariant_expr_entry *entry1,
457 const invariant_expr_entry *entry2)
458 {
459 if (entry1->mode != entry2->mode)
460 return 0;
461
462 return invariant_expr_equal_p (entry1->inv->insn, entry1->expr,
463 entry2->inv->insn, entry2->expr);
464 }
465
466 typedef hash_table<invariant_expr_hasher> invariant_htab_type;
467
468 /* Checks whether invariant with value EXPR in machine mode MODE is
469 recorded in EQ. If this is the case, return the invariant. Otherwise
470 insert INV to the table for this expression and return INV. */
471
472 static struct invariant *
473 find_or_insert_inv (invariant_htab_type *eq, rtx expr, machine_mode mode,
474 struct invariant *inv)
475 {
476 hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
477 struct invariant_expr_entry *entry;
478 struct invariant_expr_entry pentry;
479 invariant_expr_entry **slot;
480
481 pentry.expr = expr;
482 pentry.inv = inv;
483 pentry.mode = mode;
484 slot = eq->find_slot_with_hash (&pentry, hash, INSERT);
485 entry = *slot;
486
487 if (entry)
488 return entry->inv;
489
490 entry = XNEW (struct invariant_expr_entry);
491 entry->inv = inv;
492 entry->expr = expr;
493 entry->mode = mode;
494 entry->hash = hash;
495 *slot = entry;
496
497 return inv;
498 }
499
500 /* Finds invariants identical to INV and records the equivalence. EQ is the
501 hash table of the invariants. */
502
503 static void
504 find_identical_invariants (invariant_htab_type *eq, struct invariant *inv)
505 {
506 unsigned depno;
507 bitmap_iterator bi;
508 struct invariant *dep;
509 rtx expr, set;
510 machine_mode mode;
511 struct invariant *tmp;
512
513 if (inv->eqto != ~0u)
514 return;
515
516 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
517 {
518 dep = invariants[depno];
519 find_identical_invariants (eq, dep);
520 }
521
522 set = single_set (inv->insn);
523 expr = SET_SRC (set);
524 mode = GET_MODE (expr);
525 if (mode == VOIDmode)
526 mode = GET_MODE (SET_DEST (set));
527
528 tmp = find_or_insert_inv (eq, expr, mode, inv);
529 inv->eqto = tmp->invno;
530
531 if (tmp->invno != inv->invno && inv->always_executed)
532 tmp->eqno++;
533
534 if (dump_file && inv->eqto != inv->invno)
535 fprintf (dump_file,
536 "Invariant %d is equivalent to invariant %d.\n",
537 inv->invno, inv->eqto);
538 }
539
540 /* Find invariants with the same value and record the equivalences. */
541
542 static void
543 merge_identical_invariants (void)
544 {
545 unsigned i;
546 struct invariant *inv;
547 invariant_htab_type eq (invariants.length ());
548
549 FOR_EACH_VEC_ELT (invariants, i, inv)
550 find_identical_invariants (&eq, inv);
551 }
552
553 /* Determines the basic blocks inside LOOP that are always executed and
554 stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of
555 basic blocks that may either exit the loop, or contain the call that
556 does not have to return. BODY is body of the loop obtained by
557 get_loop_body_in_dom_order. */
558
559 static void
560 compute_always_reached (struct loop *loop, basic_block *body,
561 bitmap may_exit, bitmap always_reached)
562 {
563 unsigned i;
564
565 for (i = 0; i < loop->num_nodes; i++)
566 {
567 if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i]))
568 bitmap_set_bit (always_reached, i);
569
570 if (bitmap_bit_p (may_exit, i))
571 return;
572 }
573 }
574
575 /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may
576 exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT
577 additionally mark blocks that may exit due to a call. */
578
579 static void
580 find_exits (struct loop *loop, basic_block *body,
581 bitmap may_exit, bitmap has_exit)
582 {
583 unsigned i;
584 edge_iterator ei;
585 edge e;
586 struct loop *outermost_exit = loop, *aexit;
587 bool has_call = false;
588 rtx_insn *insn;
589
590 for (i = 0; i < loop->num_nodes; i++)
591 {
592 if (body[i]->loop_father == loop)
593 {
594 FOR_BB_INSNS (body[i], insn)
595 {
596 if (CALL_P (insn)
597 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
598 || !RTL_CONST_OR_PURE_CALL_P (insn)))
599 {
600 has_call = true;
601 bitmap_set_bit (may_exit, i);
602 break;
603 }
604 }
605
606 FOR_EACH_EDGE (e, ei, body[i]->succs)
607 {
608 if (! flow_bb_inside_loop_p (loop, e->dest))
609 {
610 bitmap_set_bit (may_exit, i);
611 bitmap_set_bit (has_exit, i);
612 outermost_exit = find_common_loop (outermost_exit,
613 e->dest->loop_father);
614 }
615 /* If we enter a subloop that might never terminate treat
616 it like a possible exit. */
617 if (flow_loop_nested_p (loop, e->dest->loop_father))
618 bitmap_set_bit (may_exit, i);
619 }
620 continue;
621 }
622
623 /* Use the data stored for the subloop to decide whether we may exit
624 through it. It is sufficient to do this for header of the loop,
625 as other basic blocks inside it must be dominated by it. */
626 if (body[i]->loop_father->header != body[i])
627 continue;
628
629 if (LOOP_DATA (body[i]->loop_father)->has_call)
630 {
631 has_call = true;
632 bitmap_set_bit (may_exit, i);
633 }
634 aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit;
635 if (aexit != loop)
636 {
637 bitmap_set_bit (may_exit, i);
638 bitmap_set_bit (has_exit, i);
639
640 if (flow_loop_nested_p (aexit, outermost_exit))
641 outermost_exit = aexit;
642 }
643 }
644
645 if (loop->aux == NULL)
646 {
647 loop->aux = xcalloc (1, sizeof (struct loop_data));
648 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
649 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
650 }
651 LOOP_DATA (loop)->outermost_exit = outermost_exit;
652 LOOP_DATA (loop)->has_call = has_call;
653 }
654
655 /* Check whether we may assign a value to X from a register. */
656
657 static bool
658 may_assign_reg_p (rtx x)
659 {
660 return (GET_MODE (x) != VOIDmode
661 && GET_MODE (x) != BLKmode
662 && can_copy_p (GET_MODE (x))
663 /* Do not mess with the frame pointer adjustments that can
664 be generated e.g. by expand_builtin_setjmp_receiver. */
665 && x != frame_pointer_rtx
666 && (!REG_P (x)
667 || !HARD_REGISTER_P (x)
668 || REGNO_REG_CLASS (REGNO (x)) != NO_REGS));
669 }
670
671 /* Finds definitions that may correspond to invariants in LOOP with body
672 BODY. */
673
674 static void
675 find_defs (struct loop *loop)
676 {
677 if (dump_file)
678 {
679 fprintf (dump_file,
680 "*****starting processing of loop %d ******\n",
681 loop->num);
682 }
683
684 df_chain_add_problem (DF_UD_CHAIN);
685 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
686 df_analyze_loop (loop);
687 check_invariant_table_size ();
688
689 if (dump_file)
690 {
691 df_dump_region (dump_file);
692 fprintf (dump_file,
693 "*****ending processing of loop %d ******\n",
694 loop->num);
695 }
696 }
697
698 /* Creates a new invariant for definition DEF in INSN, depending on invariants
699 in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed,
700 unless the program ends due to a function call. The newly created invariant
701 is returned. */
702
703 static struct invariant *
704 create_new_invariant (struct def *def, rtx_insn *insn, bitmap depends_on,
705 bool always_executed)
706 {
707 struct invariant *inv = XNEW (struct invariant);
708 rtx set = single_set (insn);
709 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
710
711 inv->def = def;
712 inv->always_executed = always_executed;
713 inv->depends_on = depends_on;
714
715 /* If the set is simple, usually by moving it we move the whole store out of
716 the loop. Otherwise we save only cost of the computation. */
717 if (def)
718 {
719 inv->cost = set_rtx_cost (set, speed);
720 /* ??? Try to determine cheapness of address computation. Unfortunately
721 the address cost is only a relative measure, we can't really compare
722 it with any absolute number, but only with other address costs.
723 But here we don't have any other addresses, so compare with a magic
724 number anyway. It has to be large enough to not regress PR33928
725 (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
726 enough to not regress 410.bwaves either (by still moving reg+reg
727 invariants).
728 See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
729 if (SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set))))
730 inv->cheap_address = address_cost (SET_SRC (set), word_mode,
731 ADDR_SPACE_GENERIC, speed) < 3;
732 else
733 inv->cheap_address = false;
734 }
735 else
736 {
737 inv->cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)),
738 speed);
739 inv->cheap_address = false;
740 }
741
742 inv->move = false;
743 inv->reg = NULL_RTX;
744 inv->orig_regno = -1;
745 inv->stamp = 0;
746 inv->insn = insn;
747
748 inv->invno = invariants.length ();
749 inv->eqto = ~0u;
750
751 /* Itself. */
752 inv->eqno = 1;
753
754 if (def)
755 def->invno = inv->invno;
756 invariants.safe_push (inv);
757
758 if (dump_file)
759 {
760 fprintf (dump_file,
761 "Set in insn %d is invariant (%d), cost %d, depends on ",
762 INSN_UID (insn), inv->invno, inv->cost);
763 dump_bitmap (dump_file, inv->depends_on);
764 }
765
766 return inv;
767 }
768
769 /* Return a canonical version of X for the address, from the point of view,
770 that all multiplications are represented as MULT instead of the multiply
771 by a power of 2 being represented as ASHIFT.
772
773 Callers should prepare a copy of X because this function may modify it
774 in place. */
775
776 static void
777 canonicalize_address_mult (rtx x)
778 {
779 subrtx_var_iterator::array_type array;
780 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
781 {
782 rtx sub = *iter;
783 scalar_int_mode sub_mode;
784 if (is_a <scalar_int_mode> (GET_MODE (sub), &sub_mode)
785 && GET_CODE (sub) == ASHIFT
786 && CONST_INT_P (XEXP (sub, 1))
787 && INTVAL (XEXP (sub, 1)) < GET_MODE_BITSIZE (sub_mode)
788 && INTVAL (XEXP (sub, 1)) >= 0)
789 {
790 HOST_WIDE_INT shift = INTVAL (XEXP (sub, 1));
791 PUT_CODE (sub, MULT);
792 XEXP (sub, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift, sub_mode);
793 iter.skip_subrtxes ();
794 }
795 }
796 }
797
798 /* Maximum number of sub expressions in address. We set it to
799 a small integer since it's unlikely to have a complicated
800 address expression. */
801
802 #define MAX_CANON_ADDR_PARTS (5)
803
804 /* Collect sub expressions in address X with PLUS as the seperator.
805 Sub expressions are stored in vector ADDR_PARTS. */
806
807 static void
808 collect_address_parts (rtx x, vec<rtx> *addr_parts)
809 {
810 subrtx_var_iterator::array_type array;
811 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
812 {
813 rtx sub = *iter;
814
815 if (GET_CODE (sub) != PLUS)
816 {
817 addr_parts->safe_push (sub);
818 iter.skip_subrtxes ();
819 }
820 }
821 }
822
823 /* Compare function for sorting sub expressions X and Y based on
824 precedence defined for communitive operations. */
825
826 static int
827 compare_address_parts (const void *x, const void *y)
828 {
829 const rtx *rx = (const rtx *)x;
830 const rtx *ry = (const rtx *)y;
831 int px = commutative_operand_precedence (*rx);
832 int py = commutative_operand_precedence (*ry);
833
834 return (py - px);
835 }
836
837 /* Return a canonical version address for X by following steps:
838 1) Rewrite ASHIFT into MULT recursively.
839 2) Divide address into sub expressions with PLUS as the
840 separator.
841 3) Sort sub expressions according to precedence defined
842 for communative operations.
843 4) Simplify CONST_INT_P sub expressions.
844 5) Create new canonicalized address and return.
845 Callers should prepare a copy of X because this function may
846 modify it in place. */
847
848 static rtx
849 canonicalize_address (rtx x)
850 {
851 rtx res;
852 unsigned int i, j;
853 machine_mode mode = GET_MODE (x);
854 auto_vec<rtx, MAX_CANON_ADDR_PARTS> addr_parts;
855
856 /* Rewrite ASHIFT into MULT. */
857 canonicalize_address_mult (x);
858 /* Divide address into sub expressions. */
859 collect_address_parts (x, &addr_parts);
860 /* Unlikely to have very complicated address. */
861 if (addr_parts.length () < 2
862 || addr_parts.length () > MAX_CANON_ADDR_PARTS)
863 return x;
864
865 /* Sort sub expressions according to canonicalization precedence. */
866 addr_parts.qsort (compare_address_parts);
867
868 /* Simplify all constant int summary if possible. */
869 for (i = 0; i < addr_parts.length (); i++)
870 if (CONST_INT_P (addr_parts[i]))
871 break;
872
873 for (j = i + 1; j < addr_parts.length (); j++)
874 {
875 gcc_assert (CONST_INT_P (addr_parts[j]));
876 addr_parts[i] = simplify_gen_binary (PLUS, mode,
877 addr_parts[i],
878 addr_parts[j]);
879 }
880
881 /* Chain PLUS operators to the left for !CONST_INT_P sub expressions. */
882 res = addr_parts[0];
883 for (j = 1; j < i; j++)
884 res = simplify_gen_binary (PLUS, mode, res, addr_parts[j]);
885
886 /* Pickup the last CONST_INT_P sub expression. */
887 if (i < addr_parts.length ())
888 res = simplify_gen_binary (PLUS, mode, res, addr_parts[i]);
889
890 return res;
891 }
892
893 /* Given invariant DEF and its address USE, check if the corresponding
894 invariant expr can be propagated into the use or not. */
895
896 static bool
897 inv_can_prop_to_addr_use (struct def *def, df_ref use)
898 {
899 struct invariant *inv;
900 rtx *pos = DF_REF_REAL_LOC (use), def_set, use_set;
901 rtx_insn *use_insn = DF_REF_INSN (use);
902 rtx_insn *def_insn;
903 bool ok;
904
905 inv = invariants[def->invno];
906 /* No need to check if address expression is expensive. */
907 if (!inv->cheap_address)
908 return false;
909
910 def_insn = inv->insn;
911 def_set = single_set (def_insn);
912 if (!def_set)
913 return false;
914
915 validate_unshare_change (use_insn, pos, SET_SRC (def_set), true);
916 ok = verify_changes (0);
917 /* Try harder with canonicalization in address expression. */
918 if (!ok && (use_set = single_set (use_insn)) != NULL_RTX)
919 {
920 rtx src, dest, mem = NULL_RTX;
921
922 src = SET_SRC (use_set);
923 dest = SET_DEST (use_set);
924 if (MEM_P (src))
925 mem = src;
926 else if (MEM_P (dest))
927 mem = dest;
928
929 if (mem != NULL_RTX
930 && !memory_address_addr_space_p (GET_MODE (mem),
931 XEXP (mem, 0),
932 MEM_ADDR_SPACE (mem)))
933 {
934 rtx addr = canonicalize_address (copy_rtx (XEXP (mem, 0)));
935 if (memory_address_addr_space_p (GET_MODE (mem),
936 addr, MEM_ADDR_SPACE (mem)))
937 ok = true;
938 }
939 }
940 cancel_changes (0);
941 return ok;
942 }
943
944 /* Record USE at DEF. */
945
946 static void
947 record_use (struct def *def, df_ref use)
948 {
949 struct use *u = XNEW (struct use);
950
951 u->pos = DF_REF_REAL_LOC (use);
952 u->insn = DF_REF_INSN (use);
953 u->addr_use_p = (DF_REF_TYPE (use) == DF_REF_REG_MEM_LOAD
954 || DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE);
955 u->next = def->uses;
956 def->uses = u;
957 def->n_uses++;
958 if (u->addr_use_p)
959 {
960 /* Initialize propagation information if this is the first addr
961 use of the inv def. */
962 if (def->n_addr_uses == 0)
963 def->can_prop_to_addr_uses = true;
964
965 def->n_addr_uses++;
966 if (def->can_prop_to_addr_uses && !inv_can_prop_to_addr_use (def, use))
967 def->can_prop_to_addr_uses = false;
968 }
969 }
970
971 /* Finds the invariants USE depends on and store them to the DEPENDS_ON
972 bitmap. Returns true if all dependencies of USE are known to be
973 loop invariants, false otherwise. */
974
975 static bool
976 check_dependency (basic_block bb, df_ref use, bitmap depends_on)
977 {
978 df_ref def;
979 basic_block def_bb;
980 struct df_link *defs;
981 struct def *def_data;
982 struct invariant *inv;
983
984 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
985 return false;
986
987 defs = DF_REF_CHAIN (use);
988 if (!defs)
989 {
990 unsigned int regno = DF_REF_REGNO (use);
991
992 /* If this is the use of an uninitialized argument register that is
993 likely to be spilled, do not move it lest this might extend its
994 lifetime and cause reload to die. This can occur for a call to
995 a function taking complex number arguments and moving the insns
996 preparing the arguments without moving the call itself wouldn't
997 gain much in practice. */
998 if ((DF_REF_FLAGS (use) & DF_HARD_REG_LIVE)
999 && FUNCTION_ARG_REGNO_P (regno)
1000 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno)))
1001 return false;
1002
1003 return true;
1004 }
1005
1006 if (defs->next)
1007 return false;
1008
1009 def = defs->ref;
1010 check_invariant_table_size ();
1011 inv = invariant_table[DF_REF_ID (def)];
1012 if (!inv)
1013 return false;
1014
1015 def_data = inv->def;
1016 gcc_assert (def_data != NULL);
1017
1018 def_bb = DF_REF_BB (def);
1019 /* Note that in case bb == def_bb, we know that the definition
1020 dominates insn, because def has invariant_table[DF_REF_ID(def)]
1021 defined and we process the insns in the basic block bb
1022 sequentially. */
1023 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
1024 return false;
1025
1026 bitmap_set_bit (depends_on, def_data->invno);
1027 return true;
1028 }
1029
1030
1031 /* Finds the invariants INSN depends on and store them to the DEPENDS_ON
1032 bitmap. Returns true if all dependencies of INSN are known to be
1033 loop invariants, false otherwise. */
1034
1035 static bool
1036 check_dependencies (rtx_insn *insn, bitmap depends_on)
1037 {
1038 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1039 df_ref use;
1040 basic_block bb = BLOCK_FOR_INSN (insn);
1041
1042 FOR_EACH_INSN_INFO_USE (use, insn_info)
1043 if (!check_dependency (bb, use, depends_on))
1044 return false;
1045 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1046 if (!check_dependency (bb, use, depends_on))
1047 return false;
1048
1049 return true;
1050 }
1051
1052 /* Pre-check candidate DEST to skip the one which cannot make a valid insn
1053 during move_invariant_reg. SIMPLE is to skip HARD_REGISTER. */
1054 static bool
1055 pre_check_invariant_p (bool simple, rtx dest)
1056 {
1057 if (simple && REG_P (dest) && DF_REG_DEF_COUNT (REGNO (dest)) > 1)
1058 {
1059 df_ref use;
1060 unsigned int i = REGNO (dest);
1061 struct df_insn_info *insn_info;
1062 df_ref def_rec;
1063
1064 for (use = DF_REG_USE_CHAIN (i); use; use = DF_REF_NEXT_REG (use))
1065 {
1066 rtx_insn *ref = DF_REF_INSN (use);
1067 insn_info = DF_INSN_INFO_GET (ref);
1068
1069 FOR_EACH_INSN_INFO_DEF (def_rec, insn_info)
1070 if (DF_REF_REGNO (def_rec) == i)
1071 {
1072 /* Multi definitions at this stage, most likely are due to
1073 instruction constraints, which requires both read and write
1074 on the same register. Since move_invariant_reg is not
1075 powerful enough to handle such cases, just ignore the INV
1076 and leave the chance to others. */
1077 return false;
1078 }
1079 }
1080 }
1081 return true;
1082 }
1083
1084 /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always
1085 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1086 unless the program ends due to a function call. */
1087
1088 static void
1089 find_invariant_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1090 {
1091 df_ref ref;
1092 struct def *def;
1093 bitmap depends_on;
1094 rtx set, dest;
1095 bool simple = true;
1096 struct invariant *inv;
1097
1098 /* We can't move a CC0 setter without the user. */
1099 if (HAVE_cc0 && sets_cc0_p (insn))
1100 return;
1101
1102 set = single_set (insn);
1103 if (!set)
1104 return;
1105 dest = SET_DEST (set);
1106
1107 if (!REG_P (dest)
1108 || HARD_REGISTER_P (dest))
1109 simple = false;
1110
1111 if (!may_assign_reg_p (dest)
1112 || !pre_check_invariant_p (simple, dest)
1113 || !check_maybe_invariant (SET_SRC (set)))
1114 return;
1115
1116 /* If the insn can throw exception, we cannot move it at all without changing
1117 cfg. */
1118 if (can_throw_internal (insn))
1119 return;
1120
1121 /* We cannot make trapping insn executed, unless it was executed before. */
1122 if (may_trap_or_fault_p (PATTERN (insn)) && !always_reached)
1123 return;
1124
1125 depends_on = BITMAP_ALLOC (NULL);
1126 if (!check_dependencies (insn, depends_on))
1127 {
1128 BITMAP_FREE (depends_on);
1129 return;
1130 }
1131
1132 if (simple)
1133 def = XCNEW (struct def);
1134 else
1135 def = NULL;
1136
1137 inv = create_new_invariant (def, insn, depends_on, always_executed);
1138
1139 if (simple)
1140 {
1141 ref = df_find_def (insn, dest);
1142 check_invariant_table_size ();
1143 invariant_table[DF_REF_ID (ref)] = inv;
1144 }
1145 }
1146
1147 /* Record registers used in INSN that have a unique invariant definition. */
1148
1149 static void
1150 record_uses (rtx_insn *insn)
1151 {
1152 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1153 df_ref use;
1154 struct invariant *inv;
1155
1156 FOR_EACH_INSN_INFO_USE (use, insn_info)
1157 {
1158 inv = invariant_for_use (use);
1159 if (inv)
1160 record_use (inv->def, use);
1161 }
1162 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1163 {
1164 inv = invariant_for_use (use);
1165 if (inv)
1166 record_use (inv->def, use);
1167 }
1168 }
1169
1170 /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always
1171 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1172 unless the program ends due to a function call. */
1173
1174 static void
1175 find_invariants_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1176 {
1177 find_invariant_insn (insn, always_reached, always_executed);
1178 record_uses (insn);
1179 }
1180
1181 /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the
1182 basic block is always executed. ALWAYS_EXECUTED is true if the basic
1183 block is always executed, unless the program ends due to a function
1184 call. */
1185
1186 static void
1187 find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
1188 {
1189 rtx_insn *insn;
1190
1191 FOR_BB_INSNS (bb, insn)
1192 {
1193 if (!NONDEBUG_INSN_P (insn))
1194 continue;
1195
1196 find_invariants_insn (insn, always_reached, always_executed);
1197
1198 if (always_reached
1199 && CALL_P (insn)
1200 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
1201 || ! RTL_CONST_OR_PURE_CALL_P (insn)))
1202 always_reached = false;
1203 }
1204 }
1205
1206 /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of
1207 basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the
1208 bitmap of basic blocks in BODY that are always executed unless the program
1209 ends due to a function call. */
1210
1211 static void
1212 find_invariants_body (struct loop *loop, basic_block *body,
1213 bitmap always_reached, bitmap always_executed)
1214 {
1215 unsigned i;
1216
1217 for (i = 0; i < loop->num_nodes; i++)
1218 find_invariants_bb (body[i],
1219 bitmap_bit_p (always_reached, i),
1220 bitmap_bit_p (always_executed, i));
1221 }
1222
1223 /* Finds invariants in LOOP. */
1224
1225 static void
1226 find_invariants (struct loop *loop)
1227 {
1228 auto_bitmap may_exit;
1229 auto_bitmap always_reached;
1230 auto_bitmap has_exit;
1231 auto_bitmap always_executed;
1232 basic_block *body = get_loop_body_in_dom_order (loop);
1233
1234 find_exits (loop, body, may_exit, has_exit);
1235 compute_always_reached (loop, body, may_exit, always_reached);
1236 compute_always_reached (loop, body, has_exit, always_executed);
1237
1238 find_defs (loop);
1239 find_invariants_body (loop, body, always_reached, always_executed);
1240 merge_identical_invariants ();
1241
1242 free (body);
1243 }
1244
1245 /* Frees a list of uses USE. */
1246
1247 static void
1248 free_use_list (struct use *use)
1249 {
1250 struct use *next;
1251
1252 for (; use; use = next)
1253 {
1254 next = use->next;
1255 free (use);
1256 }
1257 }
1258
1259 /* Return pressure class and number of hard registers (through *NREGS)
1260 for destination of INSN. */
1261 static enum reg_class
1262 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
1263 {
1264 rtx reg;
1265 enum reg_class pressure_class;
1266 rtx set = single_set (insn);
1267
1268 /* Considered invariant insns have only one set. */
1269 gcc_assert (set != NULL_RTX);
1270 reg = SET_DEST (set);
1271 if (GET_CODE (reg) == SUBREG)
1272 reg = SUBREG_REG (reg);
1273 if (MEM_P (reg))
1274 {
1275 *nregs = 0;
1276 pressure_class = NO_REGS;
1277 }
1278 else
1279 {
1280 if (! REG_P (reg))
1281 reg = NULL_RTX;
1282 if (reg == NULL_RTX)
1283 pressure_class = GENERAL_REGS;
1284 else
1285 {
1286 pressure_class = reg_allocno_class (REGNO (reg));
1287 pressure_class = ira_pressure_class_translate[pressure_class];
1288 }
1289 *nregs
1290 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
1291 }
1292 return pressure_class;
1293 }
1294
1295 /* Calculates cost and number of registers needed for moving invariant INV
1296 out of the loop and stores them to *COST and *REGS_NEEDED. *CL will be
1297 the REG_CLASS of INV. Return
1298 -1: if INV is invalid.
1299 0: if INV and its depends_on have same reg_class
1300 1: if INV and its depends_on have different reg_classes. */
1301
1302 static int
1303 get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed,
1304 enum reg_class *cl)
1305 {
1306 int i, acomp_cost;
1307 unsigned aregs_needed[N_REG_CLASSES];
1308 unsigned depno;
1309 struct invariant *dep;
1310 bitmap_iterator bi;
1311 int ret = 1;
1312
1313 /* Find the representative of the class of the equivalent invariants. */
1314 inv = invariants[inv->eqto];
1315
1316 *comp_cost = 0;
1317 if (! flag_ira_loop_pressure)
1318 regs_needed[0] = 0;
1319 else
1320 {
1321 for (i = 0; i < ira_pressure_classes_num; i++)
1322 regs_needed[ira_pressure_classes[i]] = 0;
1323 }
1324
1325 if (inv->move
1326 || inv->stamp == actual_stamp)
1327 return -1;
1328 inv->stamp = actual_stamp;
1329
1330 if (! flag_ira_loop_pressure)
1331 regs_needed[0]++;
1332 else
1333 {
1334 int nregs;
1335 enum reg_class pressure_class;
1336
1337 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1338 regs_needed[pressure_class] += nregs;
1339 *cl = pressure_class;
1340 ret = 0;
1341 }
1342
1343 if (!inv->cheap_address
1344 || inv->def->n_uses == 0
1345 || inv->def->n_addr_uses < inv->def->n_uses
1346 /* Count cost if the inv can't be propagated into address uses. */
1347 || !inv->def->can_prop_to_addr_uses)
1348 (*comp_cost) += inv->cost * inv->eqno;
1349
1350 #ifdef STACK_REGS
1351 {
1352 /* Hoisting constant pool constants into stack regs may cost more than
1353 just single register. On x87, the balance is affected both by the
1354 small number of FP registers, and by its register stack organization,
1355 that forces us to add compensation code in and around the loop to
1356 shuffle the operands to the top of stack before use, and pop them
1357 from the stack after the loop finishes.
1358
1359 To model this effect, we increase the number of registers needed for
1360 stack registers by two: one register push, and one register pop.
1361 This usually has the effect that FP constant loads from the constant
1362 pool are not moved out of the loop.
1363
1364 Note that this also means that dependent invariants cannot be moved.
1365 However, the primary purpose of this pass is to move loop invariant
1366 address arithmetic out of loops, and address arithmetic that depends
1367 on floating point constants is unlikely to ever occur. */
1368 rtx set = single_set (inv->insn);
1369 if (set
1370 && IS_STACK_MODE (GET_MODE (SET_SRC (set)))
1371 && constant_pool_constant_p (SET_SRC (set)))
1372 {
1373 if (flag_ira_loop_pressure)
1374 regs_needed[ira_stack_reg_pressure_class] += 2;
1375 else
1376 regs_needed[0] += 2;
1377 }
1378 }
1379 #endif
1380
1381 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
1382 {
1383 bool check_p;
1384 enum reg_class dep_cl = ALL_REGS;
1385 int dep_ret;
1386
1387 dep = invariants[depno];
1388
1389 /* If DEP is moved out of the loop, it is not a depends_on any more. */
1390 if (dep->move)
1391 continue;
1392
1393 dep_ret = get_inv_cost (dep, &acomp_cost, aregs_needed, &dep_cl);
1394
1395 if (! flag_ira_loop_pressure)
1396 check_p = aregs_needed[0] != 0;
1397 else
1398 {
1399 for (i = 0; i < ira_pressure_classes_num; i++)
1400 if (aregs_needed[ira_pressure_classes[i]] != 0)
1401 break;
1402 check_p = i < ira_pressure_classes_num;
1403
1404 if ((dep_ret == 1) || ((dep_ret == 0) && (*cl != dep_cl)))
1405 {
1406 *cl = ALL_REGS;
1407 ret = 1;
1408 }
1409 }
1410 if (check_p
1411 /* We need to check always_executed, since if the original value of
1412 the invariant may be preserved, we may need to keep it in a
1413 separate register. TODO check whether the register has an
1414 use outside of the loop. */
1415 && dep->always_executed
1416 && !dep->def->uses->next)
1417 {
1418 /* If this is a single use, after moving the dependency we will not
1419 need a new register. */
1420 if (! flag_ira_loop_pressure)
1421 aregs_needed[0]--;
1422 else
1423 {
1424 int nregs;
1425 enum reg_class pressure_class;
1426
1427 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1428 aregs_needed[pressure_class] -= nregs;
1429 }
1430 }
1431
1432 if (! flag_ira_loop_pressure)
1433 regs_needed[0] += aregs_needed[0];
1434 else
1435 {
1436 for (i = 0; i < ira_pressure_classes_num; i++)
1437 regs_needed[ira_pressure_classes[i]]
1438 += aregs_needed[ira_pressure_classes[i]];
1439 }
1440 (*comp_cost) += acomp_cost;
1441 }
1442 return ret;
1443 }
1444
1445 /* Calculates gain for eliminating invariant INV. REGS_USED is the number
1446 of registers used in the loop, NEW_REGS is the number of new variables
1447 already added due to the invariant motion. The number of registers needed
1448 for it is stored in *REGS_NEEDED. SPEED and CALL_P are flags passed
1449 through to estimate_reg_pressure_cost. */
1450
1451 static int
1452 gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
1453 unsigned *new_regs, unsigned regs_used,
1454 bool speed, bool call_p)
1455 {
1456 int comp_cost, size_cost;
1457 /* Workaround -Wmaybe-uninitialized false positive during
1458 profiledbootstrap by initializing it. */
1459 enum reg_class cl = NO_REGS;
1460 int ret;
1461
1462 actual_stamp++;
1463
1464 ret = get_inv_cost (inv, &comp_cost, regs_needed, &cl);
1465
1466 if (! flag_ira_loop_pressure)
1467 {
1468 size_cost = (estimate_reg_pressure_cost (new_regs[0] + regs_needed[0],
1469 regs_used, speed, call_p)
1470 - estimate_reg_pressure_cost (new_regs[0],
1471 regs_used, speed, call_p));
1472 }
1473 else if (ret < 0)
1474 return -1;
1475 else if ((ret == 0) && (cl == NO_REGS))
1476 /* Hoist it anyway since it does not impact register pressure. */
1477 return 1;
1478 else
1479 {
1480 int i;
1481 enum reg_class pressure_class;
1482
1483 for (i = 0; i < ira_pressure_classes_num; i++)
1484 {
1485 pressure_class = ira_pressure_classes[i];
1486
1487 if (!reg_classes_intersect_p (pressure_class, cl))
1488 continue;
1489
1490 if ((int) new_regs[pressure_class]
1491 + (int) regs_needed[pressure_class]
1492 + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1493 + IRA_LOOP_RESERVED_REGS
1494 > ira_class_hard_regs_num[pressure_class])
1495 break;
1496 }
1497 if (i < ira_pressure_classes_num)
1498 /* There will be register pressure excess and we want not to
1499 make this loop invariant motion. All loop invariants with
1500 non-positive gains will be rejected in function
1501 find_invariants_to_move. Therefore we return the negative
1502 number here.
1503
1504 One could think that this rejects also expensive loop
1505 invariant motions and this will hurt code performance.
1506 However numerous experiments with different heuristics
1507 taking invariant cost into account did not confirm this
1508 assumption. There are possible explanations for this
1509 result:
1510 o probably all expensive invariants were already moved out
1511 of the loop by PRE and gimple invariant motion pass.
1512 o expensive invariant execution will be hidden by insn
1513 scheduling or OOO processor hardware because usually such
1514 invariants have a lot of freedom to be executed
1515 out-of-order.
1516 Another reason for ignoring invariant cost vs spilling cost
1517 heuristics is also in difficulties to evaluate accurately
1518 spill cost at this stage. */
1519 return -1;
1520 else
1521 size_cost = 0;
1522 }
1523
1524 return comp_cost - size_cost;
1525 }
1526
1527 /* Finds invariant with best gain for moving. Returns the gain, stores
1528 the invariant in *BEST and number of registers needed for it to
1529 *REGS_NEEDED. REGS_USED is the number of registers used in the loop.
1530 NEW_REGS is the number of new variables already added due to invariant
1531 motion. */
1532
1533 static int
1534 best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
1535 unsigned *new_regs, unsigned regs_used,
1536 bool speed, bool call_p)
1537 {
1538 struct invariant *inv;
1539 int i, gain = 0, again;
1540 unsigned aregs_needed[N_REG_CLASSES], invno;
1541
1542 FOR_EACH_VEC_ELT (invariants, invno, inv)
1543 {
1544 if (inv->move)
1545 continue;
1546
1547 /* Only consider the "representatives" of equivalent invariants. */
1548 if (inv->eqto != inv->invno)
1549 continue;
1550
1551 again = gain_for_invariant (inv, aregs_needed, new_regs, regs_used,
1552 speed, call_p);
1553 if (again > gain)
1554 {
1555 gain = again;
1556 *best = inv;
1557 if (! flag_ira_loop_pressure)
1558 regs_needed[0] = aregs_needed[0];
1559 else
1560 {
1561 for (i = 0; i < ira_pressure_classes_num; i++)
1562 regs_needed[ira_pressure_classes[i]]
1563 = aregs_needed[ira_pressure_classes[i]];
1564 }
1565 }
1566 }
1567
1568 return gain;
1569 }
1570
1571 /* Marks invariant INVNO and all its dependencies for moving. */
1572
1573 static void
1574 set_move_mark (unsigned invno, int gain)
1575 {
1576 struct invariant *inv = invariants[invno];
1577 bitmap_iterator bi;
1578
1579 /* Find the representative of the class of the equivalent invariants. */
1580 inv = invariants[inv->eqto];
1581
1582 if (inv->move)
1583 return;
1584 inv->move = true;
1585
1586 if (dump_file)
1587 {
1588 if (gain >= 0)
1589 fprintf (dump_file, "Decided to move invariant %d -- gain %d\n",
1590 invno, gain);
1591 else
1592 fprintf (dump_file, "Decided to move dependent invariant %d\n",
1593 invno);
1594 };
1595
1596 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, bi)
1597 {
1598 set_move_mark (invno, -1);
1599 }
1600 }
1601
1602 /* Determines which invariants to move. */
1603
1604 static void
1605 find_invariants_to_move (bool speed, bool call_p)
1606 {
1607 int gain;
1608 unsigned i, regs_used, regs_needed[N_REG_CLASSES], new_regs[N_REG_CLASSES];
1609 struct invariant *inv = NULL;
1610
1611 if (!invariants.length ())
1612 return;
1613
1614 if (flag_ira_loop_pressure)
1615 /* REGS_USED is actually never used when the flag is on. */
1616 regs_used = 0;
1617 else
1618 /* We do not really do a good job in estimating number of
1619 registers used; we put some initial bound here to stand for
1620 induction variables etc. that we do not detect. */
1621 {
1622 unsigned int n_regs = DF_REG_SIZE (df);
1623
1624 regs_used = 2;
1625
1626 for (i = 0; i < n_regs; i++)
1627 {
1628 if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
1629 {
1630 /* This is a value that is used but not changed inside loop. */
1631 regs_used++;
1632 }
1633 }
1634 }
1635
1636 if (! flag_ira_loop_pressure)
1637 new_regs[0] = regs_needed[0] = 0;
1638 else
1639 {
1640 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1641 new_regs[ira_pressure_classes[i]] = 0;
1642 }
1643 while ((gain = best_gain_for_invariant (&inv, regs_needed,
1644 new_regs, regs_used,
1645 speed, call_p)) > 0)
1646 {
1647 set_move_mark (inv->invno, gain);
1648 if (! flag_ira_loop_pressure)
1649 new_regs[0] += regs_needed[0];
1650 else
1651 {
1652 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1653 new_regs[ira_pressure_classes[i]]
1654 += regs_needed[ira_pressure_classes[i]];
1655 }
1656 }
1657 }
1658
1659 /* Replace the uses, reached by the definition of invariant INV, by REG.
1660
1661 IN_GROUP is nonzero if this is part of a group of changes that must be
1662 performed as a group. In that case, the changes will be stored. The
1663 function `apply_change_group' will validate and apply the changes. */
1664
1665 static int
1666 replace_uses (struct invariant *inv, rtx reg, bool in_group)
1667 {
1668 /* Replace the uses we know to be dominated. It saves work for copy
1669 propagation, and also it is necessary so that dependent invariants
1670 are computed right. */
1671 if (inv->def)
1672 {
1673 struct use *use;
1674 for (use = inv->def->uses; use; use = use->next)
1675 validate_change (use->insn, use->pos, reg, true);
1676
1677 /* If we aren't part of a larger group, apply the changes now. */
1678 if (!in_group)
1679 return apply_change_group ();
1680 }
1681
1682 return 1;
1683 }
1684
1685 /* Whether invariant INV setting REG can be moved out of LOOP, at the end of
1686 the block preceding its header. */
1687
1688 static bool
1689 can_move_invariant_reg (struct loop *loop, struct invariant *inv, rtx reg)
1690 {
1691 df_ref def, use;
1692 unsigned int dest_regno, defs_in_loop_count = 0;
1693 rtx_insn *insn = inv->insn;
1694 basic_block bb = BLOCK_FOR_INSN (inv->insn);
1695
1696 /* We ignore hard register and memory access for cost and complexity reasons.
1697 Hard register are few at this stage and expensive to consider as they
1698 require building a separate data flow. Memory access would require using
1699 df_simulate_* and can_move_insns_across functions and is more complex. */
1700 if (!REG_P (reg) || HARD_REGISTER_P (reg))
1701 return false;
1702
1703 /* Check whether the set is always executed. We could omit this condition if
1704 we know that the register is unused outside of the loop, but it does not
1705 seem worth finding out. */
1706 if (!inv->always_executed)
1707 return false;
1708
1709 /* Check that all uses that would be dominated by def are already dominated
1710 by it. */
1711 dest_regno = REGNO (reg);
1712 for (use = DF_REG_USE_CHAIN (dest_regno); use; use = DF_REF_NEXT_REG (use))
1713 {
1714 rtx_insn *use_insn;
1715 basic_block use_bb;
1716
1717 use_insn = DF_REF_INSN (use);
1718 use_bb = BLOCK_FOR_INSN (use_insn);
1719
1720 /* Ignore instruction considered for moving. */
1721 if (use_insn == insn)
1722 continue;
1723
1724 /* Don't consider uses outside loop. */
1725 if (!flow_bb_inside_loop_p (loop, use_bb))
1726 continue;
1727
1728 /* Don't move if a use is not dominated by def in insn. */
1729 if (use_bb == bb && DF_INSN_LUID (insn) >= DF_INSN_LUID (use_insn))
1730 return false;
1731 if (!dominated_by_p (CDI_DOMINATORS, use_bb, bb))
1732 return false;
1733 }
1734
1735 /* Check for other defs. Any other def in the loop might reach a use
1736 currently reached by the def in insn. */
1737 for (def = DF_REG_DEF_CHAIN (dest_regno); def; def = DF_REF_NEXT_REG (def))
1738 {
1739 basic_block def_bb = DF_REF_BB (def);
1740
1741 /* Defs in exit block cannot reach a use they weren't already. */
1742 if (single_succ_p (def_bb))
1743 {
1744 basic_block def_bb_succ;
1745
1746 def_bb_succ = single_succ (def_bb);
1747 if (!flow_bb_inside_loop_p (loop, def_bb_succ))
1748 continue;
1749 }
1750
1751 if (++defs_in_loop_count > 1)
1752 return false;
1753 }
1754
1755 return true;
1756 }
1757
1758 /* Move invariant INVNO out of the LOOP. Returns true if this succeeds, false
1759 otherwise. */
1760
1761 static bool
1762 move_invariant_reg (struct loop *loop, unsigned invno)
1763 {
1764 struct invariant *inv = invariants[invno];
1765 struct invariant *repr = invariants[inv->eqto];
1766 unsigned i;
1767 basic_block preheader = loop_preheader_edge (loop)->src;
1768 rtx reg, set, dest, note;
1769 bitmap_iterator bi;
1770 int regno = -1;
1771
1772 if (inv->reg)
1773 return true;
1774 if (!repr->move)
1775 return false;
1776
1777 /* If this is a representative of the class of equivalent invariants,
1778 really move the invariant. Otherwise just replace its use with
1779 the register used for the representative. */
1780 if (inv == repr)
1781 {
1782 if (inv->depends_on)
1783 {
1784 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, bi)
1785 {
1786 if (!move_invariant_reg (loop, i))
1787 goto fail;
1788 }
1789 }
1790
1791 /* If possible, just move the set out of the loop. Otherwise, we
1792 need to create a temporary register. */
1793 set = single_set (inv->insn);
1794 reg = dest = SET_DEST (set);
1795 if (GET_CODE (reg) == SUBREG)
1796 reg = SUBREG_REG (reg);
1797 if (REG_P (reg))
1798 regno = REGNO (reg);
1799
1800 if (!can_move_invariant_reg (loop, inv, dest))
1801 {
1802 reg = gen_reg_rtx_and_attrs (dest);
1803
1804 /* Try replacing the destination by a new pseudoregister. */
1805 validate_change (inv->insn, &SET_DEST (set), reg, true);
1806
1807 /* As well as all the dominated uses. */
1808 replace_uses (inv, reg, true);
1809
1810 /* And validate all the changes. */
1811 if (!apply_change_group ())
1812 goto fail;
1813
1814 emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1815 }
1816 else if (dump_file)
1817 fprintf (dump_file, "Invariant %d moved without introducing a new "
1818 "temporary register\n", invno);
1819 reorder_insns (inv->insn, inv->insn, BB_END (preheader));
1820 df_recompute_luids (preheader);
1821
1822 /* If there is a REG_EQUAL note on the insn we just moved, and the
1823 insn is in a basic block that is not always executed or the note
1824 contains something for which we don't know the invariant status,
1825 the note may no longer be valid after we move the insn. Note that
1826 uses in REG_EQUAL notes are taken into account in the computation
1827 of invariants, so it is safe to retain the note even if it contains
1828 register references for which we know the invariant status. */
1829 if ((note = find_reg_note (inv->insn, REG_EQUAL, NULL_RTX))
1830 && (!inv->always_executed
1831 || !check_maybe_invariant (XEXP (note, 0))))
1832 remove_note (inv->insn, note);
1833 }
1834 else
1835 {
1836 if (!move_invariant_reg (loop, repr->invno))
1837 goto fail;
1838 reg = repr->reg;
1839 regno = repr->orig_regno;
1840 if (!replace_uses (inv, reg, false))
1841 goto fail;
1842 set = single_set (inv->insn);
1843 emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
1844 delete_insn (inv->insn);
1845 }
1846
1847 inv->reg = reg;
1848 inv->orig_regno = regno;
1849
1850 return true;
1851
1852 fail:
1853 /* If we failed, clear move flag, so that we do not try to move inv
1854 again. */
1855 if (dump_file)
1856 fprintf (dump_file, "Failed to move invariant %d\n", invno);
1857 inv->move = false;
1858 inv->reg = NULL_RTX;
1859 inv->orig_regno = -1;
1860
1861 return false;
1862 }
1863
1864 /* Move selected invariant out of the LOOP. Newly created regs are marked
1865 in TEMPORARY_REGS. */
1866
1867 static void
1868 move_invariants (struct loop *loop)
1869 {
1870 struct invariant *inv;
1871 unsigned i;
1872
1873 FOR_EACH_VEC_ELT (invariants, i, inv)
1874 move_invariant_reg (loop, i);
1875 if (flag_ira_loop_pressure && resize_reg_info ())
1876 {
1877 FOR_EACH_VEC_ELT (invariants, i, inv)
1878 if (inv->reg != NULL_RTX)
1879 {
1880 if (inv->orig_regno >= 0)
1881 setup_reg_classes (REGNO (inv->reg),
1882 reg_preferred_class (inv->orig_regno),
1883 reg_alternate_class (inv->orig_regno),
1884 reg_allocno_class (inv->orig_regno));
1885 else
1886 setup_reg_classes (REGNO (inv->reg),
1887 GENERAL_REGS, NO_REGS, GENERAL_REGS);
1888 }
1889 }
1890 /* Remove the DF_UD_CHAIN problem added in find_defs before rescanning,
1891 to save a bit of compile time. */
1892 df_remove_problem (df_chain);
1893 df_process_deferred_rescans ();
1894 }
1895
1896 /* Initializes invariant motion data. */
1897
1898 static void
1899 init_inv_motion_data (void)
1900 {
1901 actual_stamp = 1;
1902
1903 invariants.create (100);
1904 }
1905
1906 /* Frees the data allocated by invariant motion. */
1907
1908 static void
1909 free_inv_motion_data (void)
1910 {
1911 unsigned i;
1912 struct def *def;
1913 struct invariant *inv;
1914
1915 check_invariant_table_size ();
1916 for (i = 0; i < DF_DEFS_TABLE_SIZE (); i++)
1917 {
1918 inv = invariant_table[i];
1919 if (inv)
1920 {
1921 def = inv->def;
1922 gcc_assert (def != NULL);
1923
1924 free_use_list (def->uses);
1925 free (def);
1926 invariant_table[i] = NULL;
1927 }
1928 }
1929
1930 FOR_EACH_VEC_ELT (invariants, i, inv)
1931 {
1932 BITMAP_FREE (inv->depends_on);
1933 free (inv);
1934 }
1935 invariants.release ();
1936 }
1937
1938 /* Move the invariants out of the LOOP. */
1939
1940 static void
1941 move_single_loop_invariants (struct loop *loop)
1942 {
1943 init_inv_motion_data ();
1944
1945 find_invariants (loop);
1946 find_invariants_to_move (optimize_loop_for_speed_p (loop),
1947 LOOP_DATA (loop)->has_call);
1948 move_invariants (loop);
1949
1950 free_inv_motion_data ();
1951 }
1952
1953 /* Releases the auxiliary data for LOOP. */
1954
1955 static void
1956 free_loop_data (struct loop *loop)
1957 {
1958 struct loop_data *data = LOOP_DATA (loop);
1959 if (!data)
1960 return;
1961
1962 bitmap_clear (&LOOP_DATA (loop)->regs_ref);
1963 bitmap_clear (&LOOP_DATA (loop)->regs_live);
1964 free (data);
1965 loop->aux = NULL;
1966 }
1967
1968 \f
1969
1970 /* Registers currently living. */
1971 static bitmap_head curr_regs_live;
1972
1973 /* Current reg pressure for each pressure class. */
1974 static int curr_reg_pressure[N_REG_CLASSES];
1975
1976 /* Record all regs that are set in any one insn. Communication from
1977 mark_reg_{store,clobber} and global_conflicts. Asm can refer to
1978 all hard-registers. */
1979 static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
1980 ? FIRST_PSEUDO_REGISTER : MAX_RECOG_OPERANDS) * 2];
1981 /* Number of regs stored in the previous array. */
1982 static int n_regs_set;
1983
1984 /* Return pressure class and number of needed hard registers (through
1985 *NREGS) of register REGNO. */
1986 static enum reg_class
1987 get_regno_pressure_class (int regno, int *nregs)
1988 {
1989 if (regno >= FIRST_PSEUDO_REGISTER)
1990 {
1991 enum reg_class pressure_class;
1992
1993 pressure_class = reg_allocno_class (regno);
1994 pressure_class = ira_pressure_class_translate[pressure_class];
1995 *nregs
1996 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
1997 return pressure_class;
1998 }
1999 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
2000 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
2001 {
2002 *nregs = 1;
2003 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
2004 }
2005 else
2006 {
2007 *nregs = 0;
2008 return NO_REGS;
2009 }
2010 }
2011
2012 /* Increase (if INCR_P) or decrease current register pressure for
2013 register REGNO. */
2014 static void
2015 change_pressure (int regno, bool incr_p)
2016 {
2017 int nregs;
2018 enum reg_class pressure_class;
2019
2020 pressure_class = get_regno_pressure_class (regno, &nregs);
2021 if (! incr_p)
2022 curr_reg_pressure[pressure_class] -= nregs;
2023 else
2024 {
2025 curr_reg_pressure[pressure_class] += nregs;
2026 if (LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2027 < curr_reg_pressure[pressure_class])
2028 LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2029 = curr_reg_pressure[pressure_class];
2030 }
2031 }
2032
2033 /* Mark REGNO birth. */
2034 static void
2035 mark_regno_live (int regno)
2036 {
2037 struct loop *loop;
2038
2039 for (loop = curr_loop;
2040 loop != current_loops->tree_root;
2041 loop = loop_outer (loop))
2042 bitmap_set_bit (&LOOP_DATA (loop)->regs_live, regno);
2043 if (!bitmap_set_bit (&curr_regs_live, regno))
2044 return;
2045 change_pressure (regno, true);
2046 }
2047
2048 /* Mark REGNO death. */
2049 static void
2050 mark_regno_death (int regno)
2051 {
2052 if (! bitmap_clear_bit (&curr_regs_live, regno))
2053 return;
2054 change_pressure (regno, false);
2055 }
2056
2057 /* Mark setting register REG. */
2058 static void
2059 mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
2060 void *data ATTRIBUTE_UNUSED)
2061 {
2062 if (GET_CODE (reg) == SUBREG)
2063 reg = SUBREG_REG (reg);
2064
2065 if (! REG_P (reg))
2066 return;
2067
2068 regs_set[n_regs_set++] = reg;
2069
2070 unsigned int end_regno = END_REGNO (reg);
2071 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2072 mark_regno_live (regno);
2073 }
2074
2075 /* Mark clobbering register REG. */
2076 static void
2077 mark_reg_clobber (rtx reg, const_rtx setter, void *data)
2078 {
2079 if (GET_CODE (setter) == CLOBBER)
2080 mark_reg_store (reg, setter, data);
2081 }
2082
2083 /* Mark register REG death. */
2084 static void
2085 mark_reg_death (rtx reg)
2086 {
2087 unsigned int end_regno = END_REGNO (reg);
2088 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2089 mark_regno_death (regno);
2090 }
2091
2092 /* Mark occurrence of registers in X for the current loop. */
2093 static void
2094 mark_ref_regs (rtx x)
2095 {
2096 RTX_CODE code;
2097 int i;
2098 const char *fmt;
2099
2100 if (!x)
2101 return;
2102
2103 code = GET_CODE (x);
2104 if (code == REG)
2105 {
2106 struct loop *loop;
2107
2108 for (loop = curr_loop;
2109 loop != current_loops->tree_root;
2110 loop = loop_outer (loop))
2111 bitmap_set_bit (&LOOP_DATA (loop)->regs_ref, REGNO (x));
2112 return;
2113 }
2114
2115 fmt = GET_RTX_FORMAT (code);
2116 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2117 if (fmt[i] == 'e')
2118 mark_ref_regs (XEXP (x, i));
2119 else if (fmt[i] == 'E')
2120 {
2121 int j;
2122
2123 for (j = 0; j < XVECLEN (x, i); j++)
2124 mark_ref_regs (XVECEXP (x, i, j));
2125 }
2126 }
2127
2128 /* Calculate register pressure in the loops. */
2129 static void
2130 calculate_loop_reg_pressure (void)
2131 {
2132 int i;
2133 unsigned int j;
2134 bitmap_iterator bi;
2135 basic_block bb;
2136 rtx_insn *insn;
2137 rtx link;
2138 struct loop *loop, *parent;
2139
2140 FOR_EACH_LOOP (loop, 0)
2141 if (loop->aux == NULL)
2142 {
2143 loop->aux = xcalloc (1, sizeof (struct loop_data));
2144 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
2145 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
2146 }
2147 ira_setup_eliminable_regset ();
2148 bitmap_initialize (&curr_regs_live, &reg_obstack);
2149 FOR_EACH_BB_FN (bb, cfun)
2150 {
2151 curr_loop = bb->loop_father;
2152 if (curr_loop == current_loops->tree_root)
2153 continue;
2154
2155 for (loop = curr_loop;
2156 loop != current_loops->tree_root;
2157 loop = loop_outer (loop))
2158 bitmap_ior_into (&LOOP_DATA (loop)->regs_live, DF_LR_IN (bb));
2159
2160 bitmap_copy (&curr_regs_live, DF_LR_IN (bb));
2161 for (i = 0; i < ira_pressure_classes_num; i++)
2162 curr_reg_pressure[ira_pressure_classes[i]] = 0;
2163 EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live, 0, j, bi)
2164 change_pressure (j, true);
2165
2166 FOR_BB_INSNS (bb, insn)
2167 {
2168 if (! NONDEBUG_INSN_P (insn))
2169 continue;
2170
2171 mark_ref_regs (PATTERN (insn));
2172 n_regs_set = 0;
2173 note_stores (PATTERN (insn), mark_reg_clobber, NULL);
2174
2175 /* Mark any registers dead after INSN as dead now. */
2176
2177 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2178 if (REG_NOTE_KIND (link) == REG_DEAD)
2179 mark_reg_death (XEXP (link, 0));
2180
2181 /* Mark any registers set in INSN as live,
2182 and mark them as conflicting with all other live regs.
2183 Clobbers are processed again, so they conflict with
2184 the registers that are set. */
2185
2186 note_stores (PATTERN (insn), mark_reg_store, NULL);
2187
2188 if (AUTO_INC_DEC)
2189 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2190 if (REG_NOTE_KIND (link) == REG_INC)
2191 mark_reg_store (XEXP (link, 0), NULL_RTX, NULL);
2192
2193 while (n_regs_set-- > 0)
2194 {
2195 rtx note = find_regno_note (insn, REG_UNUSED,
2196 REGNO (regs_set[n_regs_set]));
2197 if (! note)
2198 continue;
2199
2200 mark_reg_death (XEXP (note, 0));
2201 }
2202 }
2203 }
2204 bitmap_release (&curr_regs_live);
2205 if (flag_ira_region == IRA_REGION_MIXED
2206 || flag_ira_region == IRA_REGION_ALL)
2207 FOR_EACH_LOOP (loop, 0)
2208 {
2209 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2210 if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
2211 {
2212 enum reg_class pressure_class;
2213 int nregs;
2214
2215 pressure_class = get_regno_pressure_class (j, &nregs);
2216 LOOP_DATA (loop)->max_reg_pressure[pressure_class] -= nregs;
2217 }
2218 }
2219 if (dump_file == NULL)
2220 return;
2221 FOR_EACH_LOOP (loop, 0)
2222 {
2223 parent = loop_outer (loop);
2224 fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
2225 loop->num, (parent == NULL ? -1 : parent->num),
2226 loop->header->index, loop_depth (loop));
2227 fprintf (dump_file, "\n ref. regnos:");
2228 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_ref, 0, j, bi)
2229 fprintf (dump_file, " %d", j);
2230 fprintf (dump_file, "\n live regnos:");
2231 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2232 fprintf (dump_file, " %d", j);
2233 fprintf (dump_file, "\n Pressure:");
2234 for (i = 0; (int) i < ira_pressure_classes_num; i++)
2235 {
2236 enum reg_class pressure_class;
2237
2238 pressure_class = ira_pressure_classes[i];
2239 if (LOOP_DATA (loop)->max_reg_pressure[pressure_class] == 0)
2240 continue;
2241 fprintf (dump_file, " %s=%d", reg_class_names[pressure_class],
2242 LOOP_DATA (loop)->max_reg_pressure[pressure_class]);
2243 }
2244 fprintf (dump_file, "\n");
2245 }
2246 }
2247
2248 \f
2249
2250 /* Move the invariants out of the loops. */
2251
2252 void
2253 move_loop_invariants (void)
2254 {
2255 struct loop *loop;
2256
2257 if (optimize == 1)
2258 df_live_add_problem ();
2259 /* ??? This is a hack. We should only need to call df_live_set_all_dirty
2260 for optimize == 1, but can_move_invariant_reg relies on DF_INSN_LUID
2261 being up-to-date. That isn't always true (even after df_analyze)
2262 because df_process_deferred_rescans doesn't necessarily cause
2263 blocks to be rescanned. */
2264 df_live_set_all_dirty ();
2265 if (flag_ira_loop_pressure)
2266 {
2267 df_analyze ();
2268 regstat_init_n_sets_and_refs ();
2269 ira_set_pseudo_classes (true, dump_file);
2270 calculate_loop_reg_pressure ();
2271 regstat_free_n_sets_and_refs ();
2272 }
2273 df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
2274 /* Process the loops, innermost first. */
2275 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2276 {
2277 curr_loop = loop;
2278 /* move_single_loop_invariants for very large loops
2279 is time consuming and might need a lot of memory. */
2280 if (loop->num_nodes <= (unsigned) LOOP_INVARIANT_MAX_BBS_IN_LOOP)
2281 move_single_loop_invariants (loop);
2282 }
2283
2284 FOR_EACH_LOOP (loop, 0)
2285 {
2286 free_loop_data (loop);
2287 }
2288
2289 if (flag_ira_loop_pressure)
2290 /* There is no sense to keep this info because it was most
2291 probably outdated by subsequent passes. */
2292 free_reg_info ();
2293 free (invariant_table);
2294 invariant_table = NULL;
2295 invariant_table_size = 0;
2296
2297 if (optimize == 1)
2298 df_remove_problem (df_live);
2299
2300 checking_verify_flow_info ();
2301 }