]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/loop-invariant.c
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / loop-invariant.c
1 /* RTL-level loop invariant motion.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This implements the loop invariant motion pass. It is very simple
21 (no calls, no loads/stores, etc.). This should be sufficient to cleanup
22 things like address arithmetics -- other more complicated invariants should
23 be eliminated on GIMPLE either in tree-ssa-loop-im.c or in tree-ssa-pre.c.
24
25 We proceed loop by loop -- it is simpler than trying to handle things
26 globally and should not lose much. First we inspect all sets inside loop
27 and create a dependency graph on insns (saying "to move this insn, you must
28 also move the following insns").
29
30 We then need to determine what to move. We estimate the number of registers
31 used and move as many invariants as possible while we still have enough free
32 registers. We prefer the expensive invariants.
33
34 Then we move the selected invariants out of the loop, creating a new
35 temporaries for them if necessary. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "coretypes.h"
40 #include "backend.h"
41 #include "target.h"
42 #include "rtl.h"
43 #include "tree.h"
44 #include "cfghooks.h"
45 #include "df.h"
46 #include "memmodel.h"
47 #include "tm_p.h"
48 #include "insn-config.h"
49 #include "regs.h"
50 #include "ira.h"
51 #include "recog.h"
52 #include "cfgrtl.h"
53 #include "cfgloop.h"
54 #include "expr.h"
55 #include "rtl-iter.h"
56 #include "dumpfile.h"
57
58 /* The data stored for the loop. */
59
60 class loop_data
61 {
62 public:
63 class loop *outermost_exit; /* The outermost exit of the loop. */
64 bool has_call; /* True if the loop contains a call. */
65 /* Maximal register pressure inside loop for given register class
66 (defined only for the pressure classes). */
67 int max_reg_pressure[N_REG_CLASSES];
68 /* Loop regs referenced and live pseudo-registers. */
69 bitmap_head regs_ref;
70 bitmap_head regs_live;
71 };
72
73 #define LOOP_DATA(LOOP) ((class loop_data *) (LOOP)->aux)
74
75 /* The description of an use. */
76
77 struct use
78 {
79 rtx *pos; /* Position of the use. */
80 rtx_insn *insn; /* The insn in that the use occurs. */
81 unsigned addr_use_p; /* Whether the use occurs in an address. */
82 struct use *next; /* Next use in the list. */
83 };
84
85 /* The description of a def. */
86
87 struct def
88 {
89 struct use *uses; /* The list of uses that are uniquely reached
90 by it. */
91 unsigned n_uses; /* Number of such uses. */
92 unsigned n_addr_uses; /* Number of uses in addresses. */
93 unsigned invno; /* The corresponding invariant. */
94 bool can_prop_to_addr_uses; /* True if the corresponding inv can be
95 propagated into its address uses. */
96 };
97
98 /* The data stored for each invariant. */
99
100 struct invariant
101 {
102 /* The number of the invariant. */
103 unsigned invno;
104
105 /* The number of the invariant with the same value. */
106 unsigned eqto;
107
108 /* The number of invariants which eqto this. */
109 unsigned eqno;
110
111 /* If we moved the invariant out of the loop, the original regno
112 that contained its value. */
113 int orig_regno;
114
115 /* If we moved the invariant out of the loop, the register that contains its
116 value. */
117 rtx reg;
118
119 /* The definition of the invariant. */
120 struct def *def;
121
122 /* The insn in that it is defined. */
123 rtx_insn *insn;
124
125 /* Whether it is always executed. */
126 bool always_executed;
127
128 /* Whether to move the invariant. */
129 bool move;
130
131 /* Whether the invariant is cheap when used as an address. */
132 bool cheap_address;
133
134 /* Cost of the invariant. */
135 unsigned cost;
136
137 /* Used for detecting already visited invariants during determining
138 costs of movements. */
139 unsigned stamp;
140
141 /* The invariants it depends on. */
142 bitmap depends_on;
143 };
144
145 /* Currently processed loop. */
146 static class loop *curr_loop;
147
148 /* Table of invariants indexed by the df_ref uid field. */
149
150 static unsigned int invariant_table_size = 0;
151 static struct invariant ** invariant_table;
152
153 /* Entry for hash table of invariant expressions. */
154
155 struct invariant_expr_entry
156 {
157 /* The invariant. */
158 struct invariant *inv;
159
160 /* Its value. */
161 rtx expr;
162
163 /* Its mode. */
164 machine_mode mode;
165
166 /* Its hash. */
167 hashval_t hash;
168 };
169
170 /* The actual stamp for marking already visited invariants during determining
171 costs of movements. */
172
173 static unsigned actual_stamp;
174
175 typedef struct invariant *invariant_p;
176
177
178 /* The invariants. */
179
180 static vec<invariant_p> invariants;
181
182 /* Check the size of the invariant table and realloc if necessary. */
183
184 static void
185 check_invariant_table_size (void)
186 {
187 if (invariant_table_size < DF_DEFS_TABLE_SIZE ())
188 {
189 unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
190 invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
191 memset (&invariant_table[invariant_table_size], 0,
192 (new_size - invariant_table_size) * sizeof (struct invariant *));
193 invariant_table_size = new_size;
194 }
195 }
196
197 /* Test for possibility of invariantness of X. */
198
199 static bool
200 check_maybe_invariant (rtx x)
201 {
202 enum rtx_code code = GET_CODE (x);
203 int i, j;
204 const char *fmt;
205
206 switch (code)
207 {
208 CASE_CONST_ANY:
209 case SYMBOL_REF:
210 case CONST:
211 case LABEL_REF:
212 return true;
213
214 case PC:
215 case UNSPEC_VOLATILE:
216 case CALL:
217 return false;
218
219 case REG:
220 return true;
221
222 case MEM:
223 /* Load/store motion is done elsewhere. ??? Perhaps also add it here?
224 It should not be hard, and might be faster than "elsewhere". */
225
226 /* Just handle the most trivial case where we load from an unchanging
227 location (most importantly, pic tables). */
228 if (MEM_READONLY_P (x) && !MEM_VOLATILE_P (x))
229 break;
230
231 return false;
232
233 case ASM_OPERANDS:
234 /* Don't mess with insns declared volatile. */
235 if (MEM_VOLATILE_P (x))
236 return false;
237 break;
238
239 default:
240 break;
241 }
242
243 fmt = GET_RTX_FORMAT (code);
244 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
245 {
246 if (fmt[i] == 'e')
247 {
248 if (!check_maybe_invariant (XEXP (x, i)))
249 return false;
250 }
251 else if (fmt[i] == 'E')
252 {
253 for (j = 0; j < XVECLEN (x, i); j++)
254 if (!check_maybe_invariant (XVECEXP (x, i, j)))
255 return false;
256 }
257 }
258
259 return true;
260 }
261
262 /* Returns the invariant definition for USE, or NULL if USE is not
263 invariant. */
264
265 static struct invariant *
266 invariant_for_use (df_ref use)
267 {
268 struct df_link *defs;
269 df_ref def;
270 basic_block bb = DF_REF_BB (use), def_bb;
271
272 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
273 return NULL;
274
275 defs = DF_REF_CHAIN (use);
276 if (!defs || defs->next)
277 return NULL;
278 def = defs->ref;
279 check_invariant_table_size ();
280 if (!invariant_table[DF_REF_ID (def)])
281 return NULL;
282
283 def_bb = DF_REF_BB (def);
284 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
285 return NULL;
286 return invariant_table[DF_REF_ID (def)];
287 }
288
289 /* Computes hash value for invariant expression X in INSN. */
290
291 static hashval_t
292 hash_invariant_expr_1 (rtx_insn *insn, rtx x)
293 {
294 enum rtx_code code = GET_CODE (x);
295 int i, j;
296 const char *fmt;
297 hashval_t val = code;
298 int do_not_record_p;
299 df_ref use;
300 struct invariant *inv;
301
302 switch (code)
303 {
304 CASE_CONST_ANY:
305 case SYMBOL_REF:
306 case CONST:
307 case LABEL_REF:
308 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
309
310 case REG:
311 use = df_find_use (insn, x);
312 if (!use)
313 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
314 inv = invariant_for_use (use);
315 if (!inv)
316 return hash_rtx (x, GET_MODE (x), &do_not_record_p, NULL, false);
317
318 gcc_assert (inv->eqto != ~0u);
319 return inv->eqto;
320
321 default:
322 break;
323 }
324
325 fmt = GET_RTX_FORMAT (code);
326 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
327 {
328 if (fmt[i] == 'e')
329 val ^= hash_invariant_expr_1 (insn, XEXP (x, i));
330 else if (fmt[i] == 'E')
331 {
332 for (j = 0; j < XVECLEN (x, i); j++)
333 val ^= hash_invariant_expr_1 (insn, XVECEXP (x, i, j));
334 }
335 else if (fmt[i] == 'i' || fmt[i] == 'n')
336 val ^= XINT (x, i);
337 else if (fmt[i] == 'p')
338 val ^= constant_lower_bound (SUBREG_BYTE (x));
339 }
340
341 return val;
342 }
343
344 /* Returns true if the invariant expressions E1 and E2 used in insns INSN1
345 and INSN2 have always the same value. */
346
347 static bool
348 invariant_expr_equal_p (rtx_insn *insn1, rtx e1, rtx_insn *insn2, rtx e2)
349 {
350 enum rtx_code code = GET_CODE (e1);
351 int i, j;
352 const char *fmt;
353 df_ref use1, use2;
354 struct invariant *inv1 = NULL, *inv2 = NULL;
355 rtx sub1, sub2;
356
357 /* If mode of only one of the operands is VOIDmode, it is not equivalent to
358 the other one. If both are VOIDmode, we rely on the caller of this
359 function to verify that their modes are the same. */
360 if (code != GET_CODE (e2) || GET_MODE (e1) != GET_MODE (e2))
361 return false;
362
363 switch (code)
364 {
365 CASE_CONST_ANY:
366 case SYMBOL_REF:
367 case CONST:
368 case LABEL_REF:
369 return rtx_equal_p (e1, e2);
370
371 case REG:
372 use1 = df_find_use (insn1, e1);
373 use2 = df_find_use (insn2, e2);
374 if (use1)
375 inv1 = invariant_for_use (use1);
376 if (use2)
377 inv2 = invariant_for_use (use2);
378
379 if (!inv1 && !inv2)
380 return rtx_equal_p (e1, e2);
381
382 if (!inv1 || !inv2)
383 return false;
384
385 gcc_assert (inv1->eqto != ~0u);
386 gcc_assert (inv2->eqto != ~0u);
387 return inv1->eqto == inv2->eqto;
388
389 default:
390 break;
391 }
392
393 fmt = GET_RTX_FORMAT (code);
394 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
395 {
396 if (fmt[i] == 'e')
397 {
398 sub1 = XEXP (e1, i);
399 sub2 = XEXP (e2, i);
400
401 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
402 return false;
403 }
404
405 else if (fmt[i] == 'E')
406 {
407 if (XVECLEN (e1, i) != XVECLEN (e2, i))
408 return false;
409
410 for (j = 0; j < XVECLEN (e1, i); j++)
411 {
412 sub1 = XVECEXP (e1, i, j);
413 sub2 = XVECEXP (e2, i, j);
414
415 if (!invariant_expr_equal_p (insn1, sub1, insn2, sub2))
416 return false;
417 }
418 }
419 else if (fmt[i] == 'i' || fmt[i] == 'n')
420 {
421 if (XINT (e1, i) != XINT (e2, i))
422 return false;
423 }
424 else if (fmt[i] == 'p')
425 {
426 if (maybe_ne (SUBREG_BYTE (e1), SUBREG_BYTE (e2)))
427 return false;
428 }
429 /* Unhandled type of subexpression, we fail conservatively. */
430 else
431 return false;
432 }
433
434 return true;
435 }
436
437 struct invariant_expr_hasher : free_ptr_hash <invariant_expr_entry>
438 {
439 static inline hashval_t hash (const invariant_expr_entry *);
440 static inline bool equal (const invariant_expr_entry *,
441 const invariant_expr_entry *);
442 };
443
444 /* Returns hash value for invariant expression entry ENTRY. */
445
446 inline hashval_t
447 invariant_expr_hasher::hash (const invariant_expr_entry *entry)
448 {
449 return entry->hash;
450 }
451
452 /* Compares invariant expression entries ENTRY1 and ENTRY2. */
453
454 inline bool
455 invariant_expr_hasher::equal (const invariant_expr_entry *entry1,
456 const invariant_expr_entry *entry2)
457 {
458 if (entry1->mode != entry2->mode)
459 return 0;
460
461 return invariant_expr_equal_p (entry1->inv->insn, entry1->expr,
462 entry2->inv->insn, entry2->expr);
463 }
464
465 typedef hash_table<invariant_expr_hasher> invariant_htab_type;
466
467 /* Checks whether invariant with value EXPR in machine mode MODE is
468 recorded in EQ. If this is the case, return the invariant. Otherwise
469 insert INV to the table for this expression and return INV. */
470
471 static struct invariant *
472 find_or_insert_inv (invariant_htab_type *eq, rtx expr, machine_mode mode,
473 struct invariant *inv)
474 {
475 hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
476 struct invariant_expr_entry *entry;
477 struct invariant_expr_entry pentry;
478 invariant_expr_entry **slot;
479
480 pentry.expr = expr;
481 pentry.inv = inv;
482 pentry.mode = mode;
483 slot = eq->find_slot_with_hash (&pentry, hash, INSERT);
484 entry = *slot;
485
486 if (entry)
487 return entry->inv;
488
489 entry = XNEW (struct invariant_expr_entry);
490 entry->inv = inv;
491 entry->expr = expr;
492 entry->mode = mode;
493 entry->hash = hash;
494 *slot = entry;
495
496 return inv;
497 }
498
499 /* Finds invariants identical to INV and records the equivalence. EQ is the
500 hash table of the invariants. */
501
502 static void
503 find_identical_invariants (invariant_htab_type *eq, struct invariant *inv)
504 {
505 unsigned depno;
506 bitmap_iterator bi;
507 struct invariant *dep;
508 rtx expr, set;
509 machine_mode mode;
510 struct invariant *tmp;
511
512 if (inv->eqto != ~0u)
513 return;
514
515 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
516 {
517 dep = invariants[depno];
518 find_identical_invariants (eq, dep);
519 }
520
521 set = single_set (inv->insn);
522 expr = SET_SRC (set);
523 mode = GET_MODE (expr);
524 if (mode == VOIDmode)
525 mode = GET_MODE (SET_DEST (set));
526
527 tmp = find_or_insert_inv (eq, expr, mode, inv);
528 inv->eqto = tmp->invno;
529
530 if (tmp->invno != inv->invno && inv->always_executed)
531 tmp->eqno++;
532
533 if (dump_file && inv->eqto != inv->invno)
534 fprintf (dump_file,
535 "Invariant %d is equivalent to invariant %d.\n",
536 inv->invno, inv->eqto);
537 }
538
539 /* Find invariants with the same value and record the equivalences. */
540
541 static void
542 merge_identical_invariants (void)
543 {
544 unsigned i;
545 struct invariant *inv;
546 invariant_htab_type eq (invariants.length ());
547
548 FOR_EACH_VEC_ELT (invariants, i, inv)
549 find_identical_invariants (&eq, inv);
550 }
551
552 /* Determines the basic blocks inside LOOP that are always executed and
553 stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of
554 basic blocks that may either exit the loop, or contain the call that
555 does not have to return. BODY is body of the loop obtained by
556 get_loop_body_in_dom_order. */
557
558 static void
559 compute_always_reached (class loop *loop, basic_block *body,
560 bitmap may_exit, bitmap always_reached)
561 {
562 unsigned i;
563
564 for (i = 0; i < loop->num_nodes; i++)
565 {
566 if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i]))
567 bitmap_set_bit (always_reached, i);
568
569 if (bitmap_bit_p (may_exit, i))
570 return;
571 }
572 }
573
574 /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may
575 exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT
576 additionally mark blocks that may exit due to a call. */
577
578 static void
579 find_exits (class loop *loop, basic_block *body,
580 bitmap may_exit, bitmap has_exit)
581 {
582 unsigned i;
583 edge_iterator ei;
584 edge e;
585 class loop *outermost_exit = loop, *aexit;
586 bool has_call = false;
587 rtx_insn *insn;
588
589 for (i = 0; i < loop->num_nodes; i++)
590 {
591 if (body[i]->loop_father == loop)
592 {
593 FOR_BB_INSNS (body[i], insn)
594 {
595 if (CALL_P (insn)
596 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
597 || !RTL_CONST_OR_PURE_CALL_P (insn)))
598 {
599 has_call = true;
600 bitmap_set_bit (may_exit, i);
601 break;
602 }
603 }
604
605 FOR_EACH_EDGE (e, ei, body[i]->succs)
606 {
607 if (! flow_bb_inside_loop_p (loop, e->dest))
608 {
609 bitmap_set_bit (may_exit, i);
610 bitmap_set_bit (has_exit, i);
611 outermost_exit = find_common_loop (outermost_exit,
612 e->dest->loop_father);
613 }
614 /* If we enter a subloop that might never terminate treat
615 it like a possible exit. */
616 if (flow_loop_nested_p (loop, e->dest->loop_father))
617 bitmap_set_bit (may_exit, i);
618 }
619 continue;
620 }
621
622 /* Use the data stored for the subloop to decide whether we may exit
623 through it. It is sufficient to do this for header of the loop,
624 as other basic blocks inside it must be dominated by it. */
625 if (body[i]->loop_father->header != body[i])
626 continue;
627
628 if (LOOP_DATA (body[i]->loop_father)->has_call)
629 {
630 has_call = true;
631 bitmap_set_bit (may_exit, i);
632 }
633 aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit;
634 if (aexit != loop)
635 {
636 bitmap_set_bit (may_exit, i);
637 bitmap_set_bit (has_exit, i);
638
639 if (flow_loop_nested_p (aexit, outermost_exit))
640 outermost_exit = aexit;
641 }
642 }
643
644 if (loop->aux == NULL)
645 {
646 loop->aux = xcalloc (1, sizeof (class loop_data));
647 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
648 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
649 }
650 LOOP_DATA (loop)->outermost_exit = outermost_exit;
651 LOOP_DATA (loop)->has_call = has_call;
652 }
653
654 /* Check whether we may assign a value to X from a register. */
655
656 static bool
657 may_assign_reg_p (rtx x)
658 {
659 return (GET_MODE (x) != VOIDmode
660 && GET_MODE (x) != BLKmode
661 && can_copy_p (GET_MODE (x))
662 /* Do not mess with the frame pointer adjustments that can
663 be generated e.g. by expand_builtin_setjmp_receiver. */
664 && x != frame_pointer_rtx
665 && (!REG_P (x)
666 || !HARD_REGISTER_P (x)
667 || REGNO_REG_CLASS (REGNO (x)) != NO_REGS));
668 }
669
670 /* Finds definitions that may correspond to invariants in LOOP with body
671 BODY. */
672
673 static void
674 find_defs (class loop *loop)
675 {
676 if (dump_file)
677 {
678 fprintf (dump_file,
679 "*****starting processing of loop %d ******\n",
680 loop->num);
681 }
682
683 df_chain_add_problem (DF_UD_CHAIN);
684 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
685 df_analyze_loop (loop);
686 check_invariant_table_size ();
687
688 if (dump_file)
689 {
690 df_dump_region (dump_file);
691 fprintf (dump_file,
692 "*****ending processing of loop %d ******\n",
693 loop->num);
694 }
695 }
696
697 /* Creates a new invariant for definition DEF in INSN, depending on invariants
698 in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed,
699 unless the program ends due to a function call. The newly created invariant
700 is returned. */
701
702 static struct invariant *
703 create_new_invariant (struct def *def, rtx_insn *insn, bitmap depends_on,
704 bool always_executed)
705 {
706 struct invariant *inv = XNEW (struct invariant);
707 rtx set = single_set (insn);
708 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
709
710 inv->def = def;
711 inv->always_executed = always_executed;
712 inv->depends_on = depends_on;
713
714 /* If the set is simple, usually by moving it we move the whole store out of
715 the loop. Otherwise we save only cost of the computation. */
716 if (def)
717 {
718 inv->cost = set_rtx_cost (set, speed);
719 /* ??? Try to determine cheapness of address computation. Unfortunately
720 the address cost is only a relative measure, we can't really compare
721 it with any absolute number, but only with other address costs.
722 But here we don't have any other addresses, so compare with a magic
723 number anyway. It has to be large enough to not regress PR33928
724 (by avoiding to move reg+8,reg+16,reg+24 invariants), but small
725 enough to not regress 410.bwaves either (by still moving reg+reg
726 invariants).
727 See http://gcc.gnu.org/ml/gcc-patches/2009-10/msg01210.html . */
728 if (SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set))))
729 inv->cheap_address = address_cost (SET_SRC (set), word_mode,
730 ADDR_SPACE_GENERIC, speed) < 3;
731 else
732 inv->cheap_address = false;
733 }
734 else
735 {
736 inv->cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)),
737 speed);
738 inv->cheap_address = false;
739 }
740
741 inv->move = false;
742 inv->reg = NULL_RTX;
743 inv->orig_regno = -1;
744 inv->stamp = 0;
745 inv->insn = insn;
746
747 inv->invno = invariants.length ();
748 inv->eqto = ~0u;
749
750 /* Itself. */
751 inv->eqno = 1;
752
753 if (def)
754 def->invno = inv->invno;
755 invariants.safe_push (inv);
756
757 if (dump_file)
758 {
759 fprintf (dump_file,
760 "Set in insn %d is invariant (%d), cost %d, depends on ",
761 INSN_UID (insn), inv->invno, inv->cost);
762 dump_bitmap (dump_file, inv->depends_on);
763 }
764
765 return inv;
766 }
767
768 /* Return a canonical version of X for the address, from the point of view,
769 that all multiplications are represented as MULT instead of the multiply
770 by a power of 2 being represented as ASHIFT.
771
772 Callers should prepare a copy of X because this function may modify it
773 in place. */
774
775 static void
776 canonicalize_address_mult (rtx x)
777 {
778 subrtx_var_iterator::array_type array;
779 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
780 {
781 rtx sub = *iter;
782 scalar_int_mode sub_mode;
783 if (is_a <scalar_int_mode> (GET_MODE (sub), &sub_mode)
784 && GET_CODE (sub) == ASHIFT
785 && CONST_INT_P (XEXP (sub, 1))
786 && INTVAL (XEXP (sub, 1)) < GET_MODE_BITSIZE (sub_mode)
787 && INTVAL (XEXP (sub, 1)) >= 0)
788 {
789 HOST_WIDE_INT shift = INTVAL (XEXP (sub, 1));
790 PUT_CODE (sub, MULT);
791 XEXP (sub, 1) = gen_int_mode (HOST_WIDE_INT_1 << shift, sub_mode);
792 iter.skip_subrtxes ();
793 }
794 }
795 }
796
797 /* Maximum number of sub expressions in address. We set it to
798 a small integer since it's unlikely to have a complicated
799 address expression. */
800
801 #define MAX_CANON_ADDR_PARTS (5)
802
803 /* Collect sub expressions in address X with PLUS as the seperator.
804 Sub expressions are stored in vector ADDR_PARTS. */
805
806 static void
807 collect_address_parts (rtx x, vec<rtx> *addr_parts)
808 {
809 subrtx_var_iterator::array_type array;
810 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
811 {
812 rtx sub = *iter;
813
814 if (GET_CODE (sub) != PLUS)
815 {
816 addr_parts->safe_push (sub);
817 iter.skip_subrtxes ();
818 }
819 }
820 }
821
822 /* Compare function for sorting sub expressions X and Y based on
823 precedence defined for communitive operations. */
824
825 static int
826 compare_address_parts (const void *x, const void *y)
827 {
828 const rtx *rx = (const rtx *)x;
829 const rtx *ry = (const rtx *)y;
830 int px = commutative_operand_precedence (*rx);
831 int py = commutative_operand_precedence (*ry);
832
833 return (py - px);
834 }
835
836 /* Return a canonical version address for X by following steps:
837 1) Rewrite ASHIFT into MULT recursively.
838 2) Divide address into sub expressions with PLUS as the
839 separator.
840 3) Sort sub expressions according to precedence defined
841 for communative operations.
842 4) Simplify CONST_INT_P sub expressions.
843 5) Create new canonicalized address and return.
844 Callers should prepare a copy of X because this function may
845 modify it in place. */
846
847 static rtx
848 canonicalize_address (rtx x)
849 {
850 rtx res;
851 unsigned int i, j;
852 machine_mode mode = GET_MODE (x);
853 auto_vec<rtx, MAX_CANON_ADDR_PARTS> addr_parts;
854
855 /* Rewrite ASHIFT into MULT. */
856 canonicalize_address_mult (x);
857 /* Divide address into sub expressions. */
858 collect_address_parts (x, &addr_parts);
859 /* Unlikely to have very complicated address. */
860 if (addr_parts.length () < 2
861 || addr_parts.length () > MAX_CANON_ADDR_PARTS)
862 return x;
863
864 /* Sort sub expressions according to canonicalization precedence. */
865 addr_parts.qsort (compare_address_parts);
866
867 /* Simplify all constant int summary if possible. */
868 for (i = 0; i < addr_parts.length (); i++)
869 if (CONST_INT_P (addr_parts[i]))
870 break;
871
872 for (j = i + 1; j < addr_parts.length (); j++)
873 {
874 gcc_assert (CONST_INT_P (addr_parts[j]));
875 addr_parts[i] = simplify_gen_binary (PLUS, mode,
876 addr_parts[i],
877 addr_parts[j]);
878 }
879
880 /* Chain PLUS operators to the left for !CONST_INT_P sub expressions. */
881 res = addr_parts[0];
882 for (j = 1; j < i; j++)
883 res = simplify_gen_binary (PLUS, mode, res, addr_parts[j]);
884
885 /* Pickup the last CONST_INT_P sub expression. */
886 if (i < addr_parts.length ())
887 res = simplify_gen_binary (PLUS, mode, res, addr_parts[i]);
888
889 return res;
890 }
891
892 /* Given invariant DEF and its address USE, check if the corresponding
893 invariant expr can be propagated into the use or not. */
894
895 static bool
896 inv_can_prop_to_addr_use (struct def *def, df_ref use)
897 {
898 struct invariant *inv;
899 rtx *pos = DF_REF_REAL_LOC (use), def_set, use_set;
900 rtx_insn *use_insn = DF_REF_INSN (use);
901 rtx_insn *def_insn;
902 bool ok;
903
904 inv = invariants[def->invno];
905 /* No need to check if address expression is expensive. */
906 if (!inv->cheap_address)
907 return false;
908
909 def_insn = inv->insn;
910 def_set = single_set (def_insn);
911 if (!def_set)
912 return false;
913
914 validate_unshare_change (use_insn, pos, SET_SRC (def_set), true);
915 ok = verify_changes (0);
916 /* Try harder with canonicalization in address expression. */
917 if (!ok && (use_set = single_set (use_insn)) != NULL_RTX)
918 {
919 rtx src, dest, mem = NULL_RTX;
920
921 src = SET_SRC (use_set);
922 dest = SET_DEST (use_set);
923 if (MEM_P (src))
924 mem = src;
925 else if (MEM_P (dest))
926 mem = dest;
927
928 if (mem != NULL_RTX
929 && !memory_address_addr_space_p (GET_MODE (mem),
930 XEXP (mem, 0),
931 MEM_ADDR_SPACE (mem)))
932 {
933 rtx addr = canonicalize_address (copy_rtx (XEXP (mem, 0)));
934 if (memory_address_addr_space_p (GET_MODE (mem),
935 addr, MEM_ADDR_SPACE (mem)))
936 ok = true;
937 }
938 }
939 cancel_changes (0);
940 return ok;
941 }
942
943 /* Record USE at DEF. */
944
945 static void
946 record_use (struct def *def, df_ref use)
947 {
948 struct use *u = XNEW (struct use);
949
950 u->pos = DF_REF_REAL_LOC (use);
951 u->insn = DF_REF_INSN (use);
952 u->addr_use_p = (DF_REF_TYPE (use) == DF_REF_REG_MEM_LOAD
953 || DF_REF_TYPE (use) == DF_REF_REG_MEM_STORE);
954 u->next = def->uses;
955 def->uses = u;
956 def->n_uses++;
957 if (u->addr_use_p)
958 {
959 /* Initialize propagation information if this is the first addr
960 use of the inv def. */
961 if (def->n_addr_uses == 0)
962 def->can_prop_to_addr_uses = true;
963
964 def->n_addr_uses++;
965 if (def->can_prop_to_addr_uses && !inv_can_prop_to_addr_use (def, use))
966 def->can_prop_to_addr_uses = false;
967 }
968 }
969
970 /* Finds the invariants USE depends on and store them to the DEPENDS_ON
971 bitmap. Returns true if all dependencies of USE are known to be
972 loop invariants, false otherwise. */
973
974 static bool
975 check_dependency (basic_block bb, df_ref use, bitmap depends_on)
976 {
977 df_ref def;
978 basic_block def_bb;
979 struct df_link *defs;
980 struct def *def_data;
981 struct invariant *inv;
982
983 if (DF_REF_FLAGS (use) & DF_REF_READ_WRITE)
984 return false;
985
986 defs = DF_REF_CHAIN (use);
987 if (!defs)
988 {
989 unsigned int regno = DF_REF_REGNO (use);
990
991 /* If this is the use of an uninitialized argument register that is
992 likely to be spilled, do not move it lest this might extend its
993 lifetime and cause reload to die. This can occur for a call to
994 a function taking complex number arguments and moving the insns
995 preparing the arguments without moving the call itself wouldn't
996 gain much in practice. */
997 if ((DF_REF_FLAGS (use) & DF_HARD_REG_LIVE)
998 && FUNCTION_ARG_REGNO_P (regno)
999 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno)))
1000 return false;
1001
1002 return true;
1003 }
1004
1005 if (defs->next)
1006 return false;
1007
1008 def = defs->ref;
1009 check_invariant_table_size ();
1010 inv = invariant_table[DF_REF_ID (def)];
1011 if (!inv)
1012 return false;
1013
1014 def_data = inv->def;
1015 gcc_assert (def_data != NULL);
1016
1017 def_bb = DF_REF_BB (def);
1018 /* Note that in case bb == def_bb, we know that the definition
1019 dominates insn, because def has invariant_table[DF_REF_ID(def)]
1020 defined and we process the insns in the basic block bb
1021 sequentially. */
1022 if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb))
1023 return false;
1024
1025 bitmap_set_bit (depends_on, def_data->invno);
1026 return true;
1027 }
1028
1029
1030 /* Finds the invariants INSN depends on and store them to the DEPENDS_ON
1031 bitmap. Returns true if all dependencies of INSN are known to be
1032 loop invariants, false otherwise. */
1033
1034 static bool
1035 check_dependencies (rtx_insn *insn, bitmap depends_on)
1036 {
1037 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1038 df_ref use;
1039 basic_block bb = BLOCK_FOR_INSN (insn);
1040
1041 FOR_EACH_INSN_INFO_USE (use, insn_info)
1042 if (!check_dependency (bb, use, depends_on))
1043 return false;
1044 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1045 if (!check_dependency (bb, use, depends_on))
1046 return false;
1047
1048 return true;
1049 }
1050
1051 /* Pre-check candidate DEST to skip the one which cannot make a valid insn
1052 during move_invariant_reg. SIMPLE is to skip HARD_REGISTER. */
1053 static bool
1054 pre_check_invariant_p (bool simple, rtx dest)
1055 {
1056 if (simple && REG_P (dest) && DF_REG_DEF_COUNT (REGNO (dest)) > 1)
1057 {
1058 df_ref use;
1059 unsigned int i = REGNO (dest);
1060 struct df_insn_info *insn_info;
1061 df_ref def_rec;
1062
1063 for (use = DF_REG_USE_CHAIN (i); use; use = DF_REF_NEXT_REG (use))
1064 {
1065 rtx_insn *ref = DF_REF_INSN (use);
1066 insn_info = DF_INSN_INFO_GET (ref);
1067
1068 FOR_EACH_INSN_INFO_DEF (def_rec, insn_info)
1069 if (DF_REF_REGNO (def_rec) == i)
1070 {
1071 /* Multi definitions at this stage, most likely are due to
1072 instruction constraints, which requires both read and write
1073 on the same register. Since move_invariant_reg is not
1074 powerful enough to handle such cases, just ignore the INV
1075 and leave the chance to others. */
1076 return false;
1077 }
1078 }
1079 }
1080 return true;
1081 }
1082
1083 /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always
1084 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1085 unless the program ends due to a function call. */
1086
1087 static void
1088 find_invariant_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1089 {
1090 df_ref ref;
1091 struct def *def;
1092 bitmap depends_on;
1093 rtx set, dest;
1094 bool simple = true;
1095 struct invariant *inv;
1096
1097 /* Jumps have control flow side-effects. */
1098 if (JUMP_P (insn))
1099 return;
1100
1101 set = single_set (insn);
1102 if (!set)
1103 return;
1104 dest = SET_DEST (set);
1105
1106 if (!REG_P (dest)
1107 || HARD_REGISTER_P (dest))
1108 simple = false;
1109
1110 if (!may_assign_reg_p (dest)
1111 || !pre_check_invariant_p (simple, dest)
1112 || !check_maybe_invariant (SET_SRC (set)))
1113 return;
1114
1115 /* If the insn can throw exception, we cannot move it at all without changing
1116 cfg. */
1117 if (can_throw_internal (insn))
1118 return;
1119
1120 /* We cannot make trapping insn executed, unless it was executed before. */
1121 if (may_trap_or_fault_p (PATTERN (insn)) && !always_reached)
1122 return;
1123
1124 depends_on = BITMAP_ALLOC (NULL);
1125 if (!check_dependencies (insn, depends_on))
1126 {
1127 BITMAP_FREE (depends_on);
1128 return;
1129 }
1130
1131 if (simple)
1132 def = XCNEW (struct def);
1133 else
1134 def = NULL;
1135
1136 inv = create_new_invariant (def, insn, depends_on, always_executed);
1137
1138 if (simple)
1139 {
1140 ref = df_find_def (insn, dest);
1141 check_invariant_table_size ();
1142 invariant_table[DF_REF_ID (ref)] = inv;
1143 }
1144 }
1145
1146 /* Record registers used in INSN that have a unique invariant definition. */
1147
1148 static void
1149 record_uses (rtx_insn *insn)
1150 {
1151 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1152 df_ref use;
1153 struct invariant *inv;
1154
1155 FOR_EACH_INSN_INFO_USE (use, insn_info)
1156 {
1157 inv = invariant_for_use (use);
1158 if (inv)
1159 record_use (inv->def, use);
1160 }
1161 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1162 {
1163 inv = invariant_for_use (use);
1164 if (inv)
1165 record_use (inv->def, use);
1166 }
1167 }
1168
1169 /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always
1170 executed. ALWAYS_EXECUTED is true if the insn is always executed,
1171 unless the program ends due to a function call. */
1172
1173 static void
1174 find_invariants_insn (rtx_insn *insn, bool always_reached, bool always_executed)
1175 {
1176 find_invariant_insn (insn, always_reached, always_executed);
1177 record_uses (insn);
1178 }
1179
1180 /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the
1181 basic block is always executed. ALWAYS_EXECUTED is true if the basic
1182 block is always executed, unless the program ends due to a function
1183 call. */
1184
1185 static void
1186 find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
1187 {
1188 rtx_insn *insn;
1189
1190 FOR_BB_INSNS (bb, insn)
1191 {
1192 if (!NONDEBUG_INSN_P (insn))
1193 continue;
1194
1195 find_invariants_insn (insn, always_reached, always_executed);
1196
1197 if (always_reached
1198 && CALL_P (insn)
1199 && (RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
1200 || ! RTL_CONST_OR_PURE_CALL_P (insn)))
1201 always_reached = false;
1202 }
1203 }
1204
1205 /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of
1206 basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the
1207 bitmap of basic blocks in BODY that are always executed unless the program
1208 ends due to a function call. */
1209
1210 static void
1211 find_invariants_body (class loop *loop, basic_block *body,
1212 bitmap always_reached, bitmap always_executed)
1213 {
1214 unsigned i;
1215
1216 for (i = 0; i < loop->num_nodes; i++)
1217 find_invariants_bb (body[i],
1218 bitmap_bit_p (always_reached, i),
1219 bitmap_bit_p (always_executed, i));
1220 }
1221
1222 /* Finds invariants in LOOP. */
1223
1224 static void
1225 find_invariants (class loop *loop)
1226 {
1227 auto_bitmap may_exit;
1228 auto_bitmap always_reached;
1229 auto_bitmap has_exit;
1230 auto_bitmap always_executed;
1231 basic_block *body = get_loop_body_in_dom_order (loop);
1232
1233 find_exits (loop, body, may_exit, has_exit);
1234 compute_always_reached (loop, body, may_exit, always_reached);
1235 compute_always_reached (loop, body, has_exit, always_executed);
1236
1237 find_defs (loop);
1238 find_invariants_body (loop, body, always_reached, always_executed);
1239 merge_identical_invariants ();
1240
1241 free (body);
1242 }
1243
1244 /* Frees a list of uses USE. */
1245
1246 static void
1247 free_use_list (struct use *use)
1248 {
1249 struct use *next;
1250
1251 for (; use; use = next)
1252 {
1253 next = use->next;
1254 free (use);
1255 }
1256 }
1257
1258 /* Return pressure class and number of hard registers (through *NREGS)
1259 for destination of INSN. */
1260 static enum reg_class
1261 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
1262 {
1263 rtx reg;
1264 enum reg_class pressure_class;
1265 rtx set = single_set (insn);
1266
1267 /* Considered invariant insns have only one set. */
1268 gcc_assert (set != NULL_RTX);
1269 reg = SET_DEST (set);
1270 if (GET_CODE (reg) == SUBREG)
1271 reg = SUBREG_REG (reg);
1272 if (MEM_P (reg))
1273 {
1274 *nregs = 0;
1275 pressure_class = NO_REGS;
1276 }
1277 else
1278 {
1279 if (! REG_P (reg))
1280 reg = NULL_RTX;
1281 if (reg == NULL_RTX)
1282 pressure_class = GENERAL_REGS;
1283 else
1284 {
1285 pressure_class = reg_allocno_class (REGNO (reg));
1286 pressure_class = ira_pressure_class_translate[pressure_class];
1287 }
1288 *nregs
1289 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
1290 }
1291 return pressure_class;
1292 }
1293
1294 /* Calculates cost and number of registers needed for moving invariant INV
1295 out of the loop and stores them to *COST and *REGS_NEEDED. *CL will be
1296 the REG_CLASS of INV. Return
1297 -1: if INV is invalid.
1298 0: if INV and its depends_on have same reg_class
1299 1: if INV and its depends_on have different reg_classes. */
1300
1301 static int
1302 get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed,
1303 enum reg_class *cl)
1304 {
1305 int i, acomp_cost;
1306 unsigned aregs_needed[N_REG_CLASSES];
1307 unsigned depno;
1308 struct invariant *dep;
1309 bitmap_iterator bi;
1310 int ret = 1;
1311
1312 /* Find the representative of the class of the equivalent invariants. */
1313 inv = invariants[inv->eqto];
1314
1315 *comp_cost = 0;
1316 if (! flag_ira_loop_pressure)
1317 regs_needed[0] = 0;
1318 else
1319 {
1320 for (i = 0; i < ira_pressure_classes_num; i++)
1321 regs_needed[ira_pressure_classes[i]] = 0;
1322 }
1323
1324 if (inv->move
1325 || inv->stamp == actual_stamp)
1326 return -1;
1327 inv->stamp = actual_stamp;
1328
1329 if (! flag_ira_loop_pressure)
1330 regs_needed[0]++;
1331 else
1332 {
1333 int nregs;
1334 enum reg_class pressure_class;
1335
1336 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1337 regs_needed[pressure_class] += nregs;
1338 *cl = pressure_class;
1339 ret = 0;
1340 }
1341
1342 if (!inv->cheap_address
1343 || inv->def->n_uses == 0
1344 || inv->def->n_addr_uses < inv->def->n_uses
1345 /* Count cost if the inv can't be propagated into address uses. */
1346 || !inv->def->can_prop_to_addr_uses)
1347 (*comp_cost) += inv->cost * inv->eqno;
1348
1349 #ifdef STACK_REGS
1350 {
1351 /* Hoisting constant pool constants into stack regs may cost more than
1352 just single register. On x87, the balance is affected both by the
1353 small number of FP registers, and by its register stack organization,
1354 that forces us to add compensation code in and around the loop to
1355 shuffle the operands to the top of stack before use, and pop them
1356 from the stack after the loop finishes.
1357
1358 To model this effect, we increase the number of registers needed for
1359 stack registers by two: one register push, and one register pop.
1360 This usually has the effect that FP constant loads from the constant
1361 pool are not moved out of the loop.
1362
1363 Note that this also means that dependent invariants cannot be moved.
1364 However, the primary purpose of this pass is to move loop invariant
1365 address arithmetic out of loops, and address arithmetic that depends
1366 on floating point constants is unlikely to ever occur. */
1367 rtx set = single_set (inv->insn);
1368 if (set
1369 && IS_STACK_MODE (GET_MODE (SET_SRC (set)))
1370 && constant_pool_constant_p (SET_SRC (set)))
1371 {
1372 if (flag_ira_loop_pressure)
1373 regs_needed[ira_stack_reg_pressure_class] += 2;
1374 else
1375 regs_needed[0] += 2;
1376 }
1377 }
1378 #endif
1379
1380 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, bi)
1381 {
1382 bool check_p;
1383 enum reg_class dep_cl = ALL_REGS;
1384 int dep_ret;
1385
1386 dep = invariants[depno];
1387
1388 /* If DEP is moved out of the loop, it is not a depends_on any more. */
1389 if (dep->move)
1390 continue;
1391
1392 dep_ret = get_inv_cost (dep, &acomp_cost, aregs_needed, &dep_cl);
1393
1394 if (! flag_ira_loop_pressure)
1395 check_p = aregs_needed[0] != 0;
1396 else
1397 {
1398 for (i = 0; i < ira_pressure_classes_num; i++)
1399 if (aregs_needed[ira_pressure_classes[i]] != 0)
1400 break;
1401 check_p = i < ira_pressure_classes_num;
1402
1403 if ((dep_ret == 1) || ((dep_ret == 0) && (*cl != dep_cl)))
1404 {
1405 *cl = ALL_REGS;
1406 ret = 1;
1407 }
1408 }
1409 if (check_p
1410 /* We need to check always_executed, since if the original value of
1411 the invariant may be preserved, we may need to keep it in a
1412 separate register. TODO check whether the register has an
1413 use outside of the loop. */
1414 && dep->always_executed
1415 && !dep->def->uses->next)
1416 {
1417 /* If this is a single use, after moving the dependency we will not
1418 need a new register. */
1419 if (! flag_ira_loop_pressure)
1420 aregs_needed[0]--;
1421 else
1422 {
1423 int nregs;
1424 enum reg_class pressure_class;
1425
1426 pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
1427 aregs_needed[pressure_class] -= nregs;
1428 }
1429 }
1430
1431 if (! flag_ira_loop_pressure)
1432 regs_needed[0] += aregs_needed[0];
1433 else
1434 {
1435 for (i = 0; i < ira_pressure_classes_num; i++)
1436 regs_needed[ira_pressure_classes[i]]
1437 += aregs_needed[ira_pressure_classes[i]];
1438 }
1439 (*comp_cost) += acomp_cost;
1440 }
1441 return ret;
1442 }
1443
1444 /* Calculates gain for eliminating invariant INV. REGS_USED is the number
1445 of registers used in the loop, NEW_REGS is the number of new variables
1446 already added due to the invariant motion. The number of registers needed
1447 for it is stored in *REGS_NEEDED. SPEED and CALL_P are flags passed
1448 through to estimate_reg_pressure_cost. */
1449
1450 static int
1451 gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
1452 unsigned *new_regs, unsigned regs_used,
1453 bool speed, bool call_p)
1454 {
1455 int comp_cost, size_cost;
1456 /* Workaround -Wmaybe-uninitialized false positive during
1457 profiledbootstrap by initializing it. */
1458 enum reg_class cl = NO_REGS;
1459 int ret;
1460
1461 actual_stamp++;
1462
1463 ret = get_inv_cost (inv, &comp_cost, regs_needed, &cl);
1464
1465 if (! flag_ira_loop_pressure)
1466 {
1467 size_cost = (estimate_reg_pressure_cost (new_regs[0] + regs_needed[0],
1468 regs_used, speed, call_p)
1469 - estimate_reg_pressure_cost (new_regs[0],
1470 regs_used, speed, call_p));
1471 }
1472 else if (ret < 0)
1473 return -1;
1474 else if ((ret == 0) && (cl == NO_REGS))
1475 /* Hoist it anyway since it does not impact register pressure. */
1476 return 1;
1477 else
1478 {
1479 int i;
1480 enum reg_class pressure_class;
1481
1482 for (i = 0; i < ira_pressure_classes_num; i++)
1483 {
1484 pressure_class = ira_pressure_classes[i];
1485
1486 if (!reg_classes_intersect_p (pressure_class, cl))
1487 continue;
1488
1489 if ((int) new_regs[pressure_class]
1490 + (int) regs_needed[pressure_class]
1491 + LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
1492 + param_ira_loop_reserved_regs
1493 > ira_class_hard_regs_num[pressure_class])
1494 break;
1495 }
1496 if (i < ira_pressure_classes_num)
1497 /* There will be register pressure excess and we want not to
1498 make this loop invariant motion. All loop invariants with
1499 non-positive gains will be rejected in function
1500 find_invariants_to_move. Therefore we return the negative
1501 number here.
1502
1503 One could think that this rejects also expensive loop
1504 invariant motions and this will hurt code performance.
1505 However numerous experiments with different heuristics
1506 taking invariant cost into account did not confirm this
1507 assumption. There are possible explanations for this
1508 result:
1509 o probably all expensive invariants were already moved out
1510 of the loop by PRE and gimple invariant motion pass.
1511 o expensive invariant execution will be hidden by insn
1512 scheduling or OOO processor hardware because usually such
1513 invariants have a lot of freedom to be executed
1514 out-of-order.
1515 Another reason for ignoring invariant cost vs spilling cost
1516 heuristics is also in difficulties to evaluate accurately
1517 spill cost at this stage. */
1518 return -1;
1519 else
1520 size_cost = 0;
1521 }
1522
1523 return comp_cost - size_cost;
1524 }
1525
1526 /* Finds invariant with best gain for moving. Returns the gain, stores
1527 the invariant in *BEST and number of registers needed for it to
1528 *REGS_NEEDED. REGS_USED is the number of registers used in the loop.
1529 NEW_REGS is the number of new variables already added due to invariant
1530 motion. */
1531
1532 static int
1533 best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
1534 unsigned *new_regs, unsigned regs_used,
1535 bool speed, bool call_p)
1536 {
1537 struct invariant *inv;
1538 int i, gain = 0, again;
1539 unsigned aregs_needed[N_REG_CLASSES], invno;
1540
1541 FOR_EACH_VEC_ELT (invariants, invno, inv)
1542 {
1543 if (inv->move)
1544 continue;
1545
1546 /* Only consider the "representatives" of equivalent invariants. */
1547 if (inv->eqto != inv->invno)
1548 continue;
1549
1550 again = gain_for_invariant (inv, aregs_needed, new_regs, regs_used,
1551 speed, call_p);
1552 if (again > gain)
1553 {
1554 gain = again;
1555 *best = inv;
1556 if (! flag_ira_loop_pressure)
1557 regs_needed[0] = aregs_needed[0];
1558 else
1559 {
1560 for (i = 0; i < ira_pressure_classes_num; i++)
1561 regs_needed[ira_pressure_classes[i]]
1562 = aregs_needed[ira_pressure_classes[i]];
1563 }
1564 }
1565 }
1566
1567 return gain;
1568 }
1569
1570 /* Marks invariant INVNO and all its dependencies for moving. */
1571
1572 static void
1573 set_move_mark (unsigned invno, int gain)
1574 {
1575 struct invariant *inv = invariants[invno];
1576 bitmap_iterator bi;
1577
1578 /* Find the representative of the class of the equivalent invariants. */
1579 inv = invariants[inv->eqto];
1580
1581 if (inv->move)
1582 return;
1583 inv->move = true;
1584
1585 if (dump_file)
1586 {
1587 if (gain >= 0)
1588 fprintf (dump_file, "Decided to move invariant %d -- gain %d\n",
1589 invno, gain);
1590 else
1591 fprintf (dump_file, "Decided to move dependent invariant %d\n",
1592 invno);
1593 };
1594
1595 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, bi)
1596 {
1597 set_move_mark (invno, -1);
1598 }
1599 }
1600
1601 /* Determines which invariants to move. */
1602
1603 static void
1604 find_invariants_to_move (bool speed, bool call_p)
1605 {
1606 int gain;
1607 unsigned i, regs_used, regs_needed[N_REG_CLASSES], new_regs[N_REG_CLASSES];
1608 struct invariant *inv = NULL;
1609
1610 if (!invariants.length ())
1611 return;
1612
1613 if (flag_ira_loop_pressure)
1614 /* REGS_USED is actually never used when the flag is on. */
1615 regs_used = 0;
1616 else
1617 /* We do not really do a good job in estimating number of
1618 registers used; we put some initial bound here to stand for
1619 induction variables etc. that we do not detect. */
1620 {
1621 unsigned int n_regs = DF_REG_SIZE (df);
1622
1623 regs_used = 2;
1624
1625 for (i = 0; i < n_regs; i++)
1626 {
1627 if (!DF_REGNO_FIRST_DEF (i) && DF_REGNO_LAST_USE (i))
1628 {
1629 /* This is a value that is used but not changed inside loop. */
1630 regs_used++;
1631 }
1632 }
1633 }
1634
1635 if (! flag_ira_loop_pressure)
1636 new_regs[0] = regs_needed[0] = 0;
1637 else
1638 {
1639 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1640 new_regs[ira_pressure_classes[i]] = 0;
1641 }
1642 while ((gain = best_gain_for_invariant (&inv, regs_needed,
1643 new_regs, regs_used,
1644 speed, call_p)) > 0)
1645 {
1646 set_move_mark (inv->invno, gain);
1647 if (! flag_ira_loop_pressure)
1648 new_regs[0] += regs_needed[0];
1649 else
1650 {
1651 for (i = 0; (int) i < ira_pressure_classes_num; i++)
1652 new_regs[ira_pressure_classes[i]]
1653 += regs_needed[ira_pressure_classes[i]];
1654 }
1655 }
1656 }
1657
1658 /* Replace the uses, reached by the definition of invariant INV, by REG.
1659
1660 IN_GROUP is nonzero if this is part of a group of changes that must be
1661 performed as a group. In that case, the changes will be stored. The
1662 function `apply_change_group' will validate and apply the changes. */
1663
1664 static int
1665 replace_uses (struct invariant *inv, rtx reg, bool in_group)
1666 {
1667 /* Replace the uses we know to be dominated. It saves work for copy
1668 propagation, and also it is necessary so that dependent invariants
1669 are computed right. */
1670 if (inv->def)
1671 {
1672 struct use *use;
1673 for (use = inv->def->uses; use; use = use->next)
1674 validate_change (use->insn, use->pos, reg, true);
1675
1676 /* If we aren't part of a larger group, apply the changes now. */
1677 if (!in_group)
1678 return apply_change_group ();
1679 }
1680
1681 return 1;
1682 }
1683
1684 /* Whether invariant INV setting REG can be moved out of LOOP, at the end of
1685 the block preceding its header. */
1686
1687 static bool
1688 can_move_invariant_reg (class loop *loop, struct invariant *inv, rtx reg)
1689 {
1690 df_ref def, use;
1691 unsigned int dest_regno, defs_in_loop_count = 0;
1692 rtx_insn *insn = inv->insn;
1693 basic_block bb = BLOCK_FOR_INSN (inv->insn);
1694
1695 /* We ignore hard register and memory access for cost and complexity reasons.
1696 Hard register are few at this stage and expensive to consider as they
1697 require building a separate data flow. Memory access would require using
1698 df_simulate_* and can_move_insns_across functions and is more complex. */
1699 if (!REG_P (reg) || HARD_REGISTER_P (reg))
1700 return false;
1701
1702 /* Check whether the set is always executed. We could omit this condition if
1703 we know that the register is unused outside of the loop, but it does not
1704 seem worth finding out. */
1705 if (!inv->always_executed)
1706 return false;
1707
1708 /* Check that all uses that would be dominated by def are already dominated
1709 by it. */
1710 dest_regno = REGNO (reg);
1711 for (use = DF_REG_USE_CHAIN (dest_regno); use; use = DF_REF_NEXT_REG (use))
1712 {
1713 rtx_insn *use_insn;
1714 basic_block use_bb;
1715
1716 use_insn = DF_REF_INSN (use);
1717 use_bb = BLOCK_FOR_INSN (use_insn);
1718
1719 /* Ignore instruction considered for moving. */
1720 if (use_insn == insn)
1721 continue;
1722
1723 /* Don't consider uses outside loop. */
1724 if (!flow_bb_inside_loop_p (loop, use_bb))
1725 continue;
1726
1727 /* Don't move if a use is not dominated by def in insn. */
1728 if (use_bb == bb && DF_INSN_LUID (insn) >= DF_INSN_LUID (use_insn))
1729 return false;
1730 if (!dominated_by_p (CDI_DOMINATORS, use_bb, bb))
1731 return false;
1732 }
1733
1734 /* Check for other defs. Any other def in the loop might reach a use
1735 currently reached by the def in insn. */
1736 for (def = DF_REG_DEF_CHAIN (dest_regno); def; def = DF_REF_NEXT_REG (def))
1737 {
1738 basic_block def_bb = DF_REF_BB (def);
1739
1740 /* Defs in exit block cannot reach a use they weren't already. */
1741 if (single_succ_p (def_bb))
1742 {
1743 basic_block def_bb_succ;
1744
1745 def_bb_succ = single_succ (def_bb);
1746 if (!flow_bb_inside_loop_p (loop, def_bb_succ))
1747 continue;
1748 }
1749
1750 if (++defs_in_loop_count > 1)
1751 return false;
1752 }
1753
1754 return true;
1755 }
1756
1757 /* Move invariant INVNO out of the LOOP. Returns true if this succeeds, false
1758 otherwise. */
1759
1760 static bool
1761 move_invariant_reg (class loop *loop, unsigned invno)
1762 {
1763 struct invariant *inv = invariants[invno];
1764 struct invariant *repr = invariants[inv->eqto];
1765 unsigned i;
1766 basic_block preheader = loop_preheader_edge (loop)->src;
1767 rtx reg, set, dest, note;
1768 bitmap_iterator bi;
1769 int regno = -1;
1770
1771 if (inv->reg)
1772 return true;
1773 if (!repr->move)
1774 return false;
1775
1776 /* If this is a representative of the class of equivalent invariants,
1777 really move the invariant. Otherwise just replace its use with
1778 the register used for the representative. */
1779 if (inv == repr)
1780 {
1781 if (inv->depends_on)
1782 {
1783 EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, bi)
1784 {
1785 if (!move_invariant_reg (loop, i))
1786 goto fail;
1787 }
1788 }
1789
1790 /* If possible, just move the set out of the loop. Otherwise, we
1791 need to create a temporary register. */
1792 set = single_set (inv->insn);
1793 reg = dest = SET_DEST (set);
1794 if (GET_CODE (reg) == SUBREG)
1795 reg = SUBREG_REG (reg);
1796 if (REG_P (reg))
1797 regno = REGNO (reg);
1798
1799 if (!can_move_invariant_reg (loop, inv, dest))
1800 {
1801 reg = gen_reg_rtx_and_attrs (dest);
1802
1803 /* Try replacing the destination by a new pseudoregister. */
1804 validate_change (inv->insn, &SET_DEST (set), reg, true);
1805
1806 /* As well as all the dominated uses. */
1807 replace_uses (inv, reg, true);
1808
1809 /* And validate all the changes. */
1810 if (!apply_change_group ())
1811 goto fail;
1812
1813 emit_insn_after (gen_move_insn (dest, reg), inv->insn);
1814 }
1815 else if (dump_file)
1816 fprintf (dump_file, "Invariant %d moved without introducing a new "
1817 "temporary register\n", invno);
1818 reorder_insns (inv->insn, inv->insn, BB_END (preheader));
1819 df_recompute_luids (preheader);
1820
1821 /* If there is a REG_EQUAL note on the insn we just moved, and the
1822 insn is in a basic block that is not always executed or the note
1823 contains something for which we don't know the invariant status,
1824 the note may no longer be valid after we move the insn. Note that
1825 uses in REG_EQUAL notes are taken into account in the computation
1826 of invariants, so it is safe to retain the note even if it contains
1827 register references for which we know the invariant status. */
1828 if ((note = find_reg_note (inv->insn, REG_EQUAL, NULL_RTX))
1829 && (!inv->always_executed
1830 || !check_maybe_invariant (XEXP (note, 0))))
1831 remove_note (inv->insn, note);
1832 }
1833 else
1834 {
1835 if (!move_invariant_reg (loop, repr->invno))
1836 goto fail;
1837 reg = repr->reg;
1838 regno = repr->orig_regno;
1839 if (!replace_uses (inv, reg, false))
1840 goto fail;
1841 set = single_set (inv->insn);
1842 emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
1843 delete_insn (inv->insn);
1844 }
1845
1846 inv->reg = reg;
1847 inv->orig_regno = regno;
1848
1849 return true;
1850
1851 fail:
1852 /* If we failed, clear move flag, so that we do not try to move inv
1853 again. */
1854 if (dump_file)
1855 fprintf (dump_file, "Failed to move invariant %d\n", invno);
1856 inv->move = false;
1857 inv->reg = NULL_RTX;
1858 inv->orig_regno = -1;
1859
1860 return false;
1861 }
1862
1863 /* Move selected invariant out of the LOOP. Newly created regs are marked
1864 in TEMPORARY_REGS. */
1865
1866 static void
1867 move_invariants (class loop *loop)
1868 {
1869 struct invariant *inv;
1870 unsigned i;
1871
1872 FOR_EACH_VEC_ELT (invariants, i, inv)
1873 move_invariant_reg (loop, i);
1874 if (flag_ira_loop_pressure && resize_reg_info ())
1875 {
1876 FOR_EACH_VEC_ELT (invariants, i, inv)
1877 if (inv->reg != NULL_RTX)
1878 {
1879 if (inv->orig_regno >= 0)
1880 setup_reg_classes (REGNO (inv->reg),
1881 reg_preferred_class (inv->orig_regno),
1882 reg_alternate_class (inv->orig_regno),
1883 reg_allocno_class (inv->orig_regno));
1884 else
1885 setup_reg_classes (REGNO (inv->reg),
1886 GENERAL_REGS, NO_REGS, GENERAL_REGS);
1887 }
1888 }
1889 /* Remove the DF_UD_CHAIN problem added in find_defs before rescanning,
1890 to save a bit of compile time. */
1891 df_remove_problem (df_chain);
1892 df_process_deferred_rescans ();
1893 }
1894
1895 /* Initializes invariant motion data. */
1896
1897 static void
1898 init_inv_motion_data (void)
1899 {
1900 actual_stamp = 1;
1901
1902 invariants.create (100);
1903 }
1904
1905 /* Frees the data allocated by invariant motion. */
1906
1907 static void
1908 free_inv_motion_data (void)
1909 {
1910 unsigned i;
1911 struct def *def;
1912 struct invariant *inv;
1913
1914 check_invariant_table_size ();
1915 for (i = 0; i < DF_DEFS_TABLE_SIZE (); i++)
1916 {
1917 inv = invariant_table[i];
1918 if (inv)
1919 {
1920 def = inv->def;
1921 gcc_assert (def != NULL);
1922
1923 free_use_list (def->uses);
1924 free (def);
1925 invariant_table[i] = NULL;
1926 }
1927 }
1928
1929 FOR_EACH_VEC_ELT (invariants, i, inv)
1930 {
1931 BITMAP_FREE (inv->depends_on);
1932 free (inv);
1933 }
1934 invariants.release ();
1935 }
1936
1937 /* Move the invariants out of the LOOP. */
1938
1939 static void
1940 move_single_loop_invariants (class loop *loop)
1941 {
1942 init_inv_motion_data ();
1943
1944 find_invariants (loop);
1945 find_invariants_to_move (optimize_loop_for_speed_p (loop),
1946 LOOP_DATA (loop)->has_call);
1947 move_invariants (loop);
1948
1949 free_inv_motion_data ();
1950 }
1951
1952 /* Releases the auxiliary data for LOOP. */
1953
1954 static void
1955 free_loop_data (class loop *loop)
1956 {
1957 class loop_data *data = LOOP_DATA (loop);
1958 if (!data)
1959 return;
1960
1961 bitmap_clear (&LOOP_DATA (loop)->regs_ref);
1962 bitmap_clear (&LOOP_DATA (loop)->regs_live);
1963 free (data);
1964 loop->aux = NULL;
1965 }
1966
1967 \f
1968
1969 /* Registers currently living. */
1970 static bitmap_head curr_regs_live;
1971
1972 /* Current reg pressure for each pressure class. */
1973 static int curr_reg_pressure[N_REG_CLASSES];
1974
1975 /* Record all regs that are set in any one insn. Communication from
1976 mark_reg_{store,clobber} and global_conflicts. Asm can refer to
1977 all hard-registers. */
1978 static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
1979 ? FIRST_PSEUDO_REGISTER : MAX_RECOG_OPERANDS) * 2];
1980 /* Number of regs stored in the previous array. */
1981 static int n_regs_set;
1982
1983 /* Return pressure class and number of needed hard registers (through
1984 *NREGS) of register REGNO. */
1985 static enum reg_class
1986 get_regno_pressure_class (int regno, int *nregs)
1987 {
1988 if (regno >= FIRST_PSEUDO_REGISTER)
1989 {
1990 enum reg_class pressure_class;
1991
1992 pressure_class = reg_allocno_class (regno);
1993 pressure_class = ira_pressure_class_translate[pressure_class];
1994 *nregs
1995 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
1996 return pressure_class;
1997 }
1998 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
1999 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
2000 {
2001 *nregs = 1;
2002 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
2003 }
2004 else
2005 {
2006 *nregs = 0;
2007 return NO_REGS;
2008 }
2009 }
2010
2011 /* Increase (if INCR_P) or decrease current register pressure for
2012 register REGNO. */
2013 static void
2014 change_pressure (int regno, bool incr_p)
2015 {
2016 int nregs;
2017 enum reg_class pressure_class;
2018
2019 pressure_class = get_regno_pressure_class (regno, &nregs);
2020 if (! incr_p)
2021 curr_reg_pressure[pressure_class] -= nregs;
2022 else
2023 {
2024 curr_reg_pressure[pressure_class] += nregs;
2025 if (LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2026 < curr_reg_pressure[pressure_class])
2027 LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
2028 = curr_reg_pressure[pressure_class];
2029 }
2030 }
2031
2032 /* Mark REGNO birth. */
2033 static void
2034 mark_regno_live (int regno)
2035 {
2036 class loop *loop;
2037
2038 for (loop = curr_loop;
2039 loop != current_loops->tree_root;
2040 loop = loop_outer (loop))
2041 bitmap_set_bit (&LOOP_DATA (loop)->regs_live, regno);
2042 if (!bitmap_set_bit (&curr_regs_live, regno))
2043 return;
2044 change_pressure (regno, true);
2045 }
2046
2047 /* Mark REGNO death. */
2048 static void
2049 mark_regno_death (int regno)
2050 {
2051 if (! bitmap_clear_bit (&curr_regs_live, regno))
2052 return;
2053 change_pressure (regno, false);
2054 }
2055
2056 /* Mark setting register REG. */
2057 static void
2058 mark_reg_store (rtx reg, const_rtx setter ATTRIBUTE_UNUSED,
2059 void *data ATTRIBUTE_UNUSED)
2060 {
2061 if (GET_CODE (reg) == SUBREG)
2062 reg = SUBREG_REG (reg);
2063
2064 if (! REG_P (reg))
2065 return;
2066
2067 regs_set[n_regs_set++] = reg;
2068
2069 unsigned int end_regno = END_REGNO (reg);
2070 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2071 mark_regno_live (regno);
2072 }
2073
2074 /* Mark clobbering register REG. */
2075 static void
2076 mark_reg_clobber (rtx reg, const_rtx setter, void *data)
2077 {
2078 if (GET_CODE (setter) == CLOBBER)
2079 mark_reg_store (reg, setter, data);
2080 }
2081
2082 /* Mark register REG death. */
2083 static void
2084 mark_reg_death (rtx reg)
2085 {
2086 unsigned int end_regno = END_REGNO (reg);
2087 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
2088 mark_regno_death (regno);
2089 }
2090
2091 /* Mark occurrence of registers in X for the current loop. */
2092 static void
2093 mark_ref_regs (rtx x)
2094 {
2095 RTX_CODE code;
2096 int i;
2097 const char *fmt;
2098
2099 if (!x)
2100 return;
2101
2102 code = GET_CODE (x);
2103 if (code == REG)
2104 {
2105 class loop *loop;
2106
2107 for (loop = curr_loop;
2108 loop != current_loops->tree_root;
2109 loop = loop_outer (loop))
2110 bitmap_set_bit (&LOOP_DATA (loop)->regs_ref, REGNO (x));
2111 return;
2112 }
2113
2114 fmt = GET_RTX_FORMAT (code);
2115 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2116 if (fmt[i] == 'e')
2117 mark_ref_regs (XEXP (x, i));
2118 else if (fmt[i] == 'E')
2119 {
2120 int j;
2121
2122 for (j = 0; j < XVECLEN (x, i); j++)
2123 mark_ref_regs (XVECEXP (x, i, j));
2124 }
2125 }
2126
2127 /* Calculate register pressure in the loops. */
2128 static void
2129 calculate_loop_reg_pressure (void)
2130 {
2131 int i;
2132 unsigned int j;
2133 bitmap_iterator bi;
2134 basic_block bb;
2135 rtx_insn *insn;
2136 rtx link;
2137 class loop *loop, *parent;
2138
2139 for (auto loop : loops_list (cfun, 0))
2140 if (loop->aux == NULL)
2141 {
2142 loop->aux = xcalloc (1, sizeof (class loop_data));
2143 bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
2144 bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
2145 }
2146 ira_setup_eliminable_regset ();
2147 bitmap_initialize (&curr_regs_live, &reg_obstack);
2148 FOR_EACH_BB_FN (bb, cfun)
2149 {
2150 curr_loop = bb->loop_father;
2151 if (curr_loop == current_loops->tree_root)
2152 continue;
2153
2154 for (loop = curr_loop;
2155 loop != current_loops->tree_root;
2156 loop = loop_outer (loop))
2157 bitmap_ior_into (&LOOP_DATA (loop)->regs_live, DF_LR_IN (bb));
2158
2159 bitmap_copy (&curr_regs_live, DF_LR_IN (bb));
2160 for (i = 0; i < ira_pressure_classes_num; i++)
2161 curr_reg_pressure[ira_pressure_classes[i]] = 0;
2162 EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live, 0, j, bi)
2163 change_pressure (j, true);
2164
2165 FOR_BB_INSNS (bb, insn)
2166 {
2167 if (! NONDEBUG_INSN_P (insn))
2168 continue;
2169
2170 mark_ref_regs (PATTERN (insn));
2171 n_regs_set = 0;
2172 note_stores (insn, mark_reg_clobber, NULL);
2173
2174 /* Mark any registers dead after INSN as dead now. */
2175
2176 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2177 if (REG_NOTE_KIND (link) == REG_DEAD)
2178 mark_reg_death (XEXP (link, 0));
2179
2180 /* Mark any registers set in INSN as live,
2181 and mark them as conflicting with all other live regs.
2182 Clobbers are processed again, so they conflict with
2183 the registers that are set. */
2184
2185 note_stores (insn, mark_reg_store, NULL);
2186
2187 if (AUTO_INC_DEC)
2188 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2189 if (REG_NOTE_KIND (link) == REG_INC)
2190 mark_reg_store (XEXP (link, 0), NULL_RTX, NULL);
2191
2192 while (n_regs_set-- > 0)
2193 {
2194 rtx note = find_regno_note (insn, REG_UNUSED,
2195 REGNO (regs_set[n_regs_set]));
2196 if (! note)
2197 continue;
2198
2199 mark_reg_death (XEXP (note, 0));
2200 }
2201 }
2202 }
2203 bitmap_release (&curr_regs_live);
2204 if (flag_ira_region == IRA_REGION_MIXED
2205 || flag_ira_region == IRA_REGION_ALL)
2206 for (auto loop : loops_list (cfun, 0))
2207 {
2208 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2209 if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
2210 {
2211 enum reg_class pressure_class;
2212 int nregs;
2213
2214 pressure_class = get_regno_pressure_class (j, &nregs);
2215 LOOP_DATA (loop)->max_reg_pressure[pressure_class] -= nregs;
2216 }
2217 }
2218 if (dump_file == NULL)
2219 return;
2220 for (auto loop : loops_list (cfun, 0))
2221 {
2222 parent = loop_outer (loop);
2223 fprintf (dump_file, "\n Loop %d (parent %d, header bb%d, depth %d)\n",
2224 loop->num, (parent == NULL ? -1 : parent->num),
2225 loop->header->index, loop_depth (loop));
2226 fprintf (dump_file, "\n ref. regnos:");
2227 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_ref, 0, j, bi)
2228 fprintf (dump_file, " %d", j);
2229 fprintf (dump_file, "\n live regnos:");
2230 EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
2231 fprintf (dump_file, " %d", j);
2232 fprintf (dump_file, "\n Pressure:");
2233 for (i = 0; (int) i < ira_pressure_classes_num; i++)
2234 {
2235 enum reg_class pressure_class;
2236
2237 pressure_class = ira_pressure_classes[i];
2238 if (LOOP_DATA (loop)->max_reg_pressure[pressure_class] == 0)
2239 continue;
2240 fprintf (dump_file, " %s=%d", reg_class_names[pressure_class],
2241 LOOP_DATA (loop)->max_reg_pressure[pressure_class]);
2242 }
2243 fprintf (dump_file, "\n");
2244 }
2245 }
2246
2247 \f
2248
2249 /* Move the invariants out of the loops. */
2250
2251 void
2252 move_loop_invariants (void)
2253 {
2254 if (optimize == 1)
2255 df_live_add_problem ();
2256 /* ??? This is a hack. We should only need to call df_live_set_all_dirty
2257 for optimize == 1, but can_move_invariant_reg relies on DF_INSN_LUID
2258 being up-to-date. That isn't always true (even after df_analyze)
2259 because df_process_deferred_rescans doesn't necessarily cause
2260 blocks to be rescanned. */
2261 df_live_set_all_dirty ();
2262 if (flag_ira_loop_pressure)
2263 {
2264 df_analyze ();
2265 regstat_init_n_sets_and_refs ();
2266 ira_set_pseudo_classes (true, dump_file);
2267 calculate_loop_reg_pressure ();
2268 regstat_free_n_sets_and_refs ();
2269 }
2270 df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
2271 /* Process the loops, innermost first. */
2272 for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
2273 {
2274 curr_loop = loop;
2275 /* move_single_loop_invariants for very large loops is time consuming
2276 and might need a lot of memory. For -O1 only do loop invariant
2277 motion for very small loops. */
2278 unsigned max_bbs = param_loop_invariant_max_bbs_in_loop;
2279 if (optimize < 2)
2280 max_bbs /= 10;
2281 if (loop->num_nodes <= max_bbs)
2282 move_single_loop_invariants (loop);
2283 }
2284
2285 for (auto loop : loops_list (cfun, 0))
2286 free_loop_data (loop);
2287
2288 if (flag_ira_loop_pressure)
2289 /* There is no sense to keep this info because it was most
2290 probably outdated by subsequent passes. */
2291 free_reg_info ();
2292 free (invariant_table);
2293 invariant_table = NULL;
2294 invariant_table_size = 0;
2295
2296 if (optimize == 1)
2297 df_remove_problem (df_live);
2298
2299 checking_verify_flow_info ();
2300 }