]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-emit.c
2015-07-07 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / ira-emit.c
1 /* Integrated Register Allocator. Changing code and generating moves.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
32
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
42
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
53
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
59
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
63
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
67
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "backend.h"
72 #include "tree.h"
73 #include "rtl.h"
74 #include "df.h"
75 #include "regs.h"
76 #include "tm_p.h"
77 #include "target.h"
78 #include "flags.h"
79 #include "obstack.h"
80 #include "cfgrtl.h"
81 #include "cfgbuild.h"
82 #include "alias.h"
83 #include "insn-config.h"
84 #include "expmed.h"
85 #include "dojump.h"
86 #include "explow.h"
87 #include "calls.h"
88 #include "emit-rtl.h"
89 #include "varasm.h"
90 #include "stmt.h"
91 #include "expr.h"
92 #include "recog.h"
93 #include "params.h"
94 #include "reload.h"
95 #include "cfgloop.h"
96 #include "ira.h"
97 #include "alloc-pool.h"
98 #include "ira-int.h"
99
100
101 /* Data used to emit live range split insns and to flattening IR. */
102 ira_emit_data_t ira_allocno_emit_data;
103
104 /* Definitions for vectors of pointers. */
105 typedef void *void_p;
106
107 /* Pointers to data allocated for allocnos being created during
108 emitting. Usually there are quite few such allocnos because they
109 are created only for resolving loop in register shuffling. */
110 static vec<void_p> new_allocno_emit_data_vec;
111
112 /* Allocate and initiate the emit data. */
113 void
114 ira_initiate_emit_data (void)
115 {
116 ira_allocno_t a;
117 ira_allocno_iterator ai;
118
119 ira_allocno_emit_data
120 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
121 * sizeof (struct ira_emit_data));
122 memset (ira_allocno_emit_data, 0,
123 ira_allocnos_num * sizeof (struct ira_emit_data));
124 FOR_EACH_ALLOCNO (a, ai)
125 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
126 new_allocno_emit_data_vec.create (50);
127
128 }
129
130 /* Free the emit data. */
131 void
132 ira_finish_emit_data (void)
133 {
134 void_p p;
135 ira_allocno_t a;
136 ira_allocno_iterator ai;
137
138 ira_free (ira_allocno_emit_data);
139 FOR_EACH_ALLOCNO (a, ai)
140 ALLOCNO_ADD_DATA (a) = NULL;
141 for (;new_allocno_emit_data_vec.length () != 0;)
142 {
143 p = new_allocno_emit_data_vec.pop ();
144 ira_free (p);
145 }
146 new_allocno_emit_data_vec.release ();
147 }
148
149 /* Create and return a new allocno with given REGNO and
150 LOOP_TREE_NODE. Allocate emit data for it. */
151 static ira_allocno_t
152 create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
153 {
154 ira_allocno_t a;
155
156 a = ira_create_allocno (regno, false, loop_tree_node);
157 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
158 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
159 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
160 return a;
161 }
162
163 \f
164
165 /* See comments below. */
166 typedef struct move *move_t;
167
168 /* The structure represents an allocno move. Both allocnos have the
169 same original regno but different allocation. */
170 struct move
171 {
172 /* The allocnos involved in the move. */
173 ira_allocno_t from, to;
174 /* The next move in the move sequence. */
175 move_t next;
176 /* Used for finding dependencies. */
177 bool visited_p;
178 /* The size of the following array. */
179 int deps_num;
180 /* Moves on which given move depends on. Dependency can be cyclic.
181 It means we need a temporary to generates the moves. Sequence
182 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
183 B1 are assigned to reg R2 is an example of the cyclic
184 dependencies. */
185 move_t *deps;
186 /* First insn generated for the move. */
187 rtx_insn *insn;
188 };
189
190 /* Array of moves (indexed by BB index) which should be put at the
191 start/end of the corresponding basic blocks. */
192 static move_t *at_bb_start, *at_bb_end;
193
194 /* Max regno before renaming some pseudo-registers. For example, the
195 same pseudo-register can be renamed in a loop if its allocation is
196 different outside the loop. */
197 static int max_regno_before_changing;
198
199 /* Return new move of allocnos TO and FROM. */
200 static move_t
201 create_move (ira_allocno_t to, ira_allocno_t from)
202 {
203 move_t move;
204
205 move = (move_t) ira_allocate (sizeof (struct move));
206 move->deps = NULL;
207 move->deps_num = 0;
208 move->to = to;
209 move->from = from;
210 move->next = NULL;
211 move->insn = NULL;
212 move->visited_p = false;
213 return move;
214 }
215
216 /* Free memory for MOVE and its dependencies. */
217 static void
218 free_move (move_t move)
219 {
220 if (move->deps != NULL)
221 ira_free (move->deps);
222 ira_free (move);
223 }
224
225 /* Free memory for list of the moves given by its HEAD. */
226 static void
227 free_move_list (move_t head)
228 {
229 move_t next;
230
231 for (; head != NULL; head = next)
232 {
233 next = head->next;
234 free_move (head);
235 }
236 }
237
238 /* Return TRUE if the move list LIST1 and LIST2 are equal (two
239 moves are equal if they involve the same allocnos). */
240 static bool
241 eq_move_lists_p (move_t list1, move_t list2)
242 {
243 for (; list1 != NULL && list2 != NULL;
244 list1 = list1->next, list2 = list2->next)
245 if (list1->from != list2->from || list1->to != list2->to)
246 return false;
247 return list1 == list2;
248 }
249
250 /* Print move list LIST into file F. */
251 static void
252 print_move_list (FILE *f, move_t list)
253 {
254 for (; list != NULL; list = list->next)
255 fprintf (f, " a%dr%d->a%dr%d",
256 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
257 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
258 fprintf (f, "\n");
259 }
260
261 extern void ira_debug_move_list (move_t list);
262
263 /* Print move list LIST into stderr. */
264 void
265 ira_debug_move_list (move_t list)
266 {
267 print_move_list (stderr, list);
268 }
269
270 /* This recursive function changes pseudo-registers in *LOC if it is
271 necessary. The function returns TRUE if a change was done. */
272 static bool
273 change_regs (rtx *loc)
274 {
275 int i, regno, result = false;
276 const char *fmt;
277 enum rtx_code code;
278 rtx reg;
279
280 if (*loc == NULL_RTX)
281 return false;
282 code = GET_CODE (*loc);
283 if (code == REG)
284 {
285 regno = REGNO (*loc);
286 if (regno < FIRST_PSEUDO_REGISTER)
287 return false;
288 if (regno >= max_regno_before_changing)
289 /* It is a shared register which was changed already. */
290 return false;
291 if (ira_curr_regno_allocno_map[regno] == NULL)
292 return false;
293 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
294 if (reg == *loc)
295 return false;
296 *loc = reg;
297 return true;
298 }
299
300 fmt = GET_RTX_FORMAT (code);
301 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
302 {
303 if (fmt[i] == 'e')
304 result = change_regs (&XEXP (*loc, i)) || result;
305 else if (fmt[i] == 'E')
306 {
307 int j;
308
309 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
310 result = change_regs (&XVECEXP (*loc, i, j)) || result;
311 }
312 }
313 return result;
314 }
315
316 static bool
317 change_regs_in_insn (rtx_insn **insn_ptr)
318 {
319 rtx rtx = *insn_ptr;
320 bool result = change_regs (&rtx);
321 *insn_ptr = as_a <rtx_insn *> (rtx);
322 return result;
323 }
324
325 /* Attach MOVE to the edge E. The move is attached to the head of the
326 list if HEAD_P is TRUE. */
327 static void
328 add_to_edge_list (edge e, move_t move, bool head_p)
329 {
330 move_t last;
331
332 if (head_p || e->aux == NULL)
333 {
334 move->next = (move_t) e->aux;
335 e->aux = move;
336 }
337 else
338 {
339 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
340 ;
341 last->next = move;
342 move->next = NULL;
343 }
344 }
345
346 /* Create and return new pseudo-register with the same attributes as
347 ORIGINAL_REG. */
348 rtx
349 ira_create_new_reg (rtx original_reg)
350 {
351 rtx new_reg;
352
353 new_reg = gen_reg_rtx (GET_MODE (original_reg));
354 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
355 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
356 REG_POINTER (new_reg) = REG_POINTER (original_reg);
357 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
358 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
359 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
360 REGNO (new_reg), REGNO (original_reg));
361 ira_expand_reg_equiv ();
362 return new_reg;
363 }
364
365 /* Return TRUE if loop given by SUBNODE inside the loop given by
366 NODE. */
367 static bool
368 subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
369 {
370 for (; subnode != NULL; subnode = subnode->parent)
371 if (subnode == node)
372 return true;
373 return false;
374 }
375
376 /* Set up member `reg' to REG for allocnos which has the same regno as
377 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
378 static void
379 set_allocno_reg (ira_allocno_t allocno, rtx reg)
380 {
381 int regno;
382 ira_allocno_t a;
383 ira_loop_tree_node_t node;
384
385 node = ALLOCNO_LOOP_TREE_NODE (allocno);
386 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
387 a != NULL;
388 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
389 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
390 ALLOCNO_EMIT_DATA (a)->reg = reg;
391 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
392 ALLOCNO_EMIT_DATA (a)->reg = reg;
393 regno = ALLOCNO_REGNO (allocno);
394 for (a = allocno;;)
395 {
396 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
397 {
398 node = node->parent;
399 if (node == NULL)
400 break;
401 a = node->regno_allocno_map[regno];
402 }
403 if (a == NULL)
404 continue;
405 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
406 break;
407 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
408 }
409 }
410
411 /* Return true if there is an entry to given loop not from its parent
412 (or grandparent) block. For example, it is possible for two
413 adjacent loops inside another loop. */
414 static bool
415 entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
416 {
417 ira_loop_tree_node_t bb_node, src_loop_node, parent;
418 edge e;
419 edge_iterator ei;
420
421 for (bb_node = loop_node->children;
422 bb_node != NULL;
423 bb_node = bb_node->next)
424 if (bb_node->bb != NULL)
425 {
426 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
427 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
428 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
429 {
430 for (parent = src_loop_node->parent;
431 parent != NULL;
432 parent = parent->parent)
433 if (parent == loop_node)
434 break;
435 if (parent != NULL)
436 /* That is an exit from a nested loop -- skip it. */
437 continue;
438 for (parent = loop_node->parent;
439 parent != NULL;
440 parent = parent->parent)
441 if (src_loop_node == parent)
442 break;
443 if (parent == NULL)
444 return true;
445 }
446 }
447 return false;
448 }
449
450 /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
451 static void
452 setup_entered_from_non_parent_p (void)
453 {
454 unsigned int i;
455 loop_p loop;
456
457 ira_assert (current_loops != NULL);
458 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
459 if (ira_loop_nodes[i].regno_allocno_map != NULL)
460 ira_loop_nodes[i].entered_from_non_parent_p
461 = entered_from_non_parent_p (&ira_loop_nodes[i]);
462 }
463
464 /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
465 DEST_ALLOCNO (assigned to memory) can be removed because it does
466 not change value of the destination. One possible reason for this
467 is the situation when SRC_ALLOCNO is not modified in the
468 corresponding loop. */
469 static bool
470 store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
471 {
472 int regno, orig_regno;
473 ira_allocno_t a;
474 ira_loop_tree_node_t node;
475
476 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
477 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
478 orig_regno = ALLOCNO_REGNO (src_allocno);
479 regno = REGNO (allocno_emit_reg (dest_allocno));
480 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
481 node != NULL;
482 node = node->parent)
483 {
484 a = node->regno_allocno_map[orig_regno];
485 ira_assert (a != NULL);
486 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
487 /* We achieved the destination and everything is ok. */
488 return true;
489 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
490 return false;
491 else if (node->entered_from_non_parent_p)
492 /* If there is a path from a destination loop block to the
493 source loop header containing basic blocks of non-parents
494 (grandparents) of the source loop, we should have checked
495 modifications of the pseudo on this path too to decide
496 about possibility to remove the store. It could be done by
497 solving a data-flow problem. Unfortunately such global
498 solution would complicate IR flattening. Therefore we just
499 prohibit removal of the store in such complicated case. */
500 return false;
501 }
502 /* It is actually a loop entry -- do not remove the store. */
503 return false;
504 }
505
506 /* Generate and attach moves to the edge E. This looks at the final
507 regnos of allocnos living on the edge with the same original regno
508 to figure out when moves should be generated. */
509 static void
510 generate_edge_moves (edge e)
511 {
512 ira_loop_tree_node_t src_loop_node, dest_loop_node;
513 unsigned int regno;
514 bitmap_iterator bi;
515 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
516 move_t move;
517 bitmap regs_live_in_dest, regs_live_out_src;
518
519 src_loop_node = IRA_BB_NODE (e->src)->parent;
520 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
521 e->aux = NULL;
522 if (src_loop_node == dest_loop_node)
523 return;
524 src_map = src_loop_node->regno_allocno_map;
525 dest_map = dest_loop_node->regno_allocno_map;
526 regs_live_in_dest = df_get_live_in (e->dest);
527 regs_live_out_src = df_get_live_out (e->src);
528 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
529 FIRST_PSEUDO_REGISTER, regno, bi)
530 if (bitmap_bit_p (regs_live_out_src, regno))
531 {
532 src_allocno = src_map[regno];
533 dest_allocno = dest_map[regno];
534 if (REGNO (allocno_emit_reg (src_allocno))
535 == REGNO (allocno_emit_reg (dest_allocno)))
536 continue;
537 /* Remove unnecessary stores at the region exit. We should do
538 this for readonly memory for sure and this is guaranteed by
539 that we never generate moves on region borders (see
540 checking in function change_loop). */
541 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
542 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
543 && store_can_be_removed_p (src_allocno, dest_allocno))
544 {
545 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
546 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
547 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
548 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
549 regno, ALLOCNO_NUM (src_allocno),
550 ALLOCNO_NUM (dest_allocno));
551 continue;
552 }
553 move = create_move (dest_allocno, src_allocno);
554 add_to_edge_list (e, move, true);
555 }
556 }
557
558 /* Bitmap of allocnos local for the current loop. */
559 static bitmap local_allocno_bitmap;
560
561 /* This bitmap is used to find that we need to generate and to use a
562 new pseudo-register when processing allocnos with the same original
563 regno. */
564 static bitmap used_regno_bitmap;
565
566 /* This bitmap contains regnos of allocnos which were renamed locally
567 because the allocnos correspond to disjoint live ranges in loops
568 with a common parent. */
569 static bitmap renamed_regno_bitmap;
570
571 /* Change (if necessary) pseudo-registers inside loop given by loop
572 tree node NODE. */
573 static void
574 change_loop (ira_loop_tree_node_t node)
575 {
576 bitmap_iterator bi;
577 unsigned int i;
578 int regno;
579 bool used_p;
580 ira_allocno_t allocno, parent_allocno, *map;
581 rtx_insn *insn;
582 rtx original_reg;
583 enum reg_class aclass, pclass;
584 ira_loop_tree_node_t parent;
585
586 if (node != ira_loop_tree_root)
587 {
588 ira_assert (current_loops != NULL);
589
590 if (node->bb != NULL)
591 {
592 FOR_BB_INSNS (node->bb, insn)
593 if (INSN_P (insn) && change_regs_in_insn (&insn))
594 {
595 df_insn_rescan (insn);
596 df_notes_rescan (insn);
597 }
598 return;
599 }
600
601 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
602 fprintf (ira_dump_file,
603 " Changing RTL for loop %d (header bb%d)\n",
604 node->loop_num, node->loop->header->index);
605
606 parent = ira_curr_loop_tree_node->parent;
607 map = parent->regno_allocno_map;
608 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
609 0, i, bi)
610 {
611 allocno = ira_allocnos[i];
612 regno = ALLOCNO_REGNO (allocno);
613 aclass = ALLOCNO_CLASS (allocno);
614 pclass = ira_pressure_class_translate[aclass];
615 parent_allocno = map[regno];
616 ira_assert (regno < ira_reg_equiv_len);
617 /* We generate the same hard register move because the
618 reload pass can put an allocno into memory in this case
619 we will have live range splitting. If it does not happen
620 such the same hard register moves will be removed. The
621 worst case when the both allocnos are put into memory by
622 the reload is very rare. */
623 if (parent_allocno != NULL
624 && (ALLOCNO_HARD_REGNO (allocno)
625 == ALLOCNO_HARD_REGNO (parent_allocno))
626 && (ALLOCNO_HARD_REGNO (allocno) < 0
627 || (parent->reg_pressure[pclass] + 1
628 <= ira_class_hard_regs_num[pclass])
629 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
630 [ALLOCNO_MODE (allocno)],
631 ALLOCNO_HARD_REGNO (allocno))
632 /* don't create copies because reload can spill an
633 allocno set by copy although the allocno will not
634 get memory slot. */
635 || ira_equiv_no_lvalue_p (regno)
636 || (pic_offset_table_rtx != NULL
637 && (ALLOCNO_REGNO (allocno)
638 == (int) REGNO (pic_offset_table_rtx)))))
639 continue;
640 original_reg = allocno_emit_reg (allocno);
641 if (parent_allocno == NULL
642 || (REGNO (allocno_emit_reg (parent_allocno))
643 == REGNO (original_reg)))
644 {
645 if (internal_flag_ira_verbose > 3 && ira_dump_file)
646 fprintf (ira_dump_file, " %i vs parent %i:",
647 ALLOCNO_HARD_REGNO (allocno),
648 ALLOCNO_HARD_REGNO (parent_allocno));
649 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
650 }
651 }
652 }
653 /* Rename locals: Local allocnos with same regno in different loops
654 might get the different hard register. So we need to change
655 ALLOCNO_REG. */
656 bitmap_and_compl (local_allocno_bitmap,
657 ira_curr_loop_tree_node->all_allocnos,
658 ira_curr_loop_tree_node->border_allocnos);
659 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
660 {
661 allocno = ira_allocnos[i];
662 regno = ALLOCNO_REGNO (allocno);
663 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
664 continue;
665 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
666 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
667 if (! used_p)
668 continue;
669 bitmap_set_bit (renamed_regno_bitmap, regno);
670 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
671 }
672 }
673
674 /* Process to set up flag somewhere_renamed_p. */
675 static void
676 set_allocno_somewhere_renamed_p (void)
677 {
678 unsigned int regno;
679 ira_allocno_t allocno;
680 ira_allocno_iterator ai;
681
682 FOR_EACH_ALLOCNO (allocno, ai)
683 {
684 regno = ALLOCNO_REGNO (allocno);
685 if (bitmap_bit_p (renamed_regno_bitmap, regno)
686 && REGNO (allocno_emit_reg (allocno)) == regno)
687 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
688 }
689 }
690
691 /* Return TRUE if move lists on all edges given in vector VEC are
692 equal. */
693 static bool
694 eq_edge_move_lists_p (vec<edge, va_gc> *vec)
695 {
696 move_t list;
697 int i;
698
699 list = (move_t) EDGE_I (vec, 0)->aux;
700 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
701 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
702 return false;
703 return true;
704 }
705
706 /* Look at all entry edges (if START_P) or exit edges of basic block
707 BB and put move lists at the BB start or end if it is possible. In
708 other words, this decreases code duplication of allocno moves. */
709 static void
710 unify_moves (basic_block bb, bool start_p)
711 {
712 int i;
713 edge e;
714 move_t list;
715 vec<edge, va_gc> *vec;
716
717 vec = (start_p ? bb->preds : bb->succs);
718 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
719 return;
720 e = EDGE_I (vec, 0);
721 list = (move_t) e->aux;
722 if (! start_p && control_flow_insn_p (BB_END (bb)))
723 return;
724 e->aux = NULL;
725 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
726 {
727 e = EDGE_I (vec, i);
728 free_move_list ((move_t) e->aux);
729 e->aux = NULL;
730 }
731 if (start_p)
732 at_bb_start[bb->index] = list;
733 else
734 at_bb_end[bb->index] = list;
735 }
736
737 /* Last move (in move sequence being processed) setting up the
738 corresponding hard register. */
739 static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
740
741 /* If the element value is equal to CURR_TICK then the corresponding
742 element in `hard_regno_last_set' is defined and correct. */
743 static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
744
745 /* Last move (in move sequence being processed) setting up the
746 corresponding allocno. */
747 static move_t *allocno_last_set;
748
749 /* If the element value is equal to CURR_TICK then the corresponding
750 element in . `allocno_last_set' is defined and correct. */
751 static int *allocno_last_set_check;
752
753 /* Definition of vector of moves. */
754
755 /* This vec contains moves sorted topologically (depth-first) on their
756 dependency graph. */
757 static vec<move_t> move_vec;
758
759 /* The variable value is used to check correctness of values of
760 elements of arrays `hard_regno_last_set' and
761 `allocno_last_set_check'. */
762 static int curr_tick;
763
764 /* This recursive function traverses dependencies of MOVE and produces
765 topological sorting (in depth-first order). */
766 static void
767 traverse_moves (move_t move)
768 {
769 int i;
770
771 if (move->visited_p)
772 return;
773 move->visited_p = true;
774 for (i = move->deps_num - 1; i >= 0; i--)
775 traverse_moves (move->deps[i]);
776 move_vec.safe_push (move);
777 }
778
779 /* Remove unnecessary moves in the LIST, makes topological sorting,
780 and removes cycles on hard reg dependencies by introducing new
781 allocnos assigned to memory and additional moves. It returns the
782 result move list. */
783 static move_t
784 modify_move_list (move_t list)
785 {
786 int i, n, nregs, hard_regno;
787 ira_allocno_t to, from;
788 move_t move, new_move, set_move, first, last;
789
790 if (list == NULL)
791 return NULL;
792 /* Create move deps. */
793 curr_tick++;
794 for (move = list; move != NULL; move = move->next)
795 {
796 to = move->to;
797 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
798 continue;
799 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
800 for (i = 0; i < nregs; i++)
801 {
802 hard_regno_last_set[hard_regno + i] = move;
803 hard_regno_last_set_check[hard_regno + i] = curr_tick;
804 }
805 }
806 for (move = list; move != NULL; move = move->next)
807 {
808 from = move->from;
809 to = move->to;
810 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
811 {
812 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
813 for (n = i = 0; i < nregs; i++)
814 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
815 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
816 != ALLOCNO_REGNO (from)))
817 n++;
818 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
819 for (n = i = 0; i < nregs; i++)
820 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
821 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
822 != ALLOCNO_REGNO (from)))
823 move->deps[n++] = hard_regno_last_set[hard_regno + i];
824 move->deps_num = n;
825 }
826 }
827 /* Topological sorting: */
828 move_vec.truncate (0);
829 for (move = list; move != NULL; move = move->next)
830 traverse_moves (move);
831 last = NULL;
832 for (i = (int) move_vec.length () - 1; i >= 0; i--)
833 {
834 move = move_vec[i];
835 move->next = NULL;
836 if (last != NULL)
837 last->next = move;
838 last = move;
839 }
840 first = move_vec.last ();
841 /* Removing cycles: */
842 curr_tick++;
843 move_vec.truncate (0);
844 for (move = first; move != NULL; move = move->next)
845 {
846 from = move->from;
847 to = move->to;
848 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
849 {
850 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
851 for (i = 0; i < nregs; i++)
852 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
853 && ALLOCNO_HARD_REGNO
854 (hard_regno_last_set[hard_regno + i]->to) >= 0)
855 {
856 int n, j;
857 ira_allocno_t new_allocno;
858
859 set_move = hard_regno_last_set[hard_regno + i];
860 /* It does not matter what loop_tree_node (of TO or
861 FROM) to use for the new allocno because of
862 subsequent IRA internal representation
863 flattening. */
864 new_allocno
865 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
866 ALLOCNO_LOOP_TREE_NODE (set_move->to));
867 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
868 ira_set_allocno_class (new_allocno,
869 ALLOCNO_CLASS (set_move->to));
870 ira_create_allocno_objects (new_allocno);
871 ALLOCNO_ASSIGNED_P (new_allocno) = true;
872 ALLOCNO_HARD_REGNO (new_allocno) = -1;
873 ALLOCNO_EMIT_DATA (new_allocno)->reg
874 = ira_create_new_reg (allocno_emit_reg (set_move->to));
875
876 /* Make it possibly conflicting with all earlier
877 created allocnos. Cases where temporary allocnos
878 created to remove the cycles are quite rare. */
879 n = ALLOCNO_NUM_OBJECTS (new_allocno);
880 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
881 for (j = 0; j < n; j++)
882 {
883 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
884
885 OBJECT_MIN (new_obj) = 0;
886 OBJECT_MAX (new_obj) = ira_objects_num - 1;
887 }
888
889 new_move = create_move (set_move->to, new_allocno);
890 set_move->to = new_allocno;
891 move_vec.safe_push (new_move);
892 ira_move_loops_num++;
893 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
894 fprintf (ira_dump_file,
895 " Creating temporary allocno a%dr%d\n",
896 ALLOCNO_NUM (new_allocno),
897 REGNO (allocno_emit_reg (new_allocno)));
898 }
899 }
900 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
901 continue;
902 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
903 for (i = 0; i < nregs; i++)
904 {
905 hard_regno_last_set[hard_regno + i] = move;
906 hard_regno_last_set_check[hard_regno + i] = curr_tick;
907 }
908 }
909 for (i = (int) move_vec.length () - 1; i >= 0; i--)
910 {
911 move = move_vec[i];
912 move->next = NULL;
913 last->next = move;
914 last = move;
915 }
916 return first;
917 }
918
919 /* Generate RTX move insns from the move list LIST. This updates
920 allocation cost using move execution frequency FREQ. */
921 static rtx_insn *
922 emit_move_list (move_t list, int freq)
923 {
924 rtx to, from, dest;
925 int to_regno, from_regno, cost, regno;
926 rtx_insn *result, *insn;
927 rtx set;
928 machine_mode mode;
929 enum reg_class aclass;
930
931 grow_reg_equivs ();
932 start_sequence ();
933 for (; list != NULL; list = list->next)
934 {
935 start_sequence ();
936 to = allocno_emit_reg (list->to);
937 to_regno = REGNO (to);
938 from = allocno_emit_reg (list->from);
939 from_regno = REGNO (from);
940 emit_move_insn (to, from);
941 list->insn = get_insns ();
942 end_sequence ();
943 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
944 {
945 /* The reload needs to have set up insn codes. If the
946 reload sets up insn codes by itself, it may fail because
947 insns will have hard registers instead of pseudos and
948 there may be no machine insn with given hard
949 registers. */
950 recog_memoized (insn);
951 /* Add insn to equiv init insn list if it is necessary.
952 Otherwise reload will not remove this insn if it decides
953 to use the equivalence. */
954 if ((set = single_set (insn)) != NULL_RTX)
955 {
956 dest = SET_DEST (set);
957 if (GET_CODE (dest) == SUBREG)
958 dest = SUBREG_REG (dest);
959 ira_assert (REG_P (dest));
960 regno = REGNO (dest);
961 if (regno >= ira_reg_equiv_len
962 || (ira_reg_equiv[regno].invariant == NULL_RTX
963 && ira_reg_equiv[regno].constant == NULL_RTX))
964 continue; /* regno has no equivalence. */
965 ira_assert ((int) reg_equivs->length () > regno);
966 reg_equiv_init (regno)
967 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
968 }
969 }
970 if (ira_use_lra_p)
971 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
972 emit_insn (list->insn);
973 mode = ALLOCNO_MODE (list->to);
974 aclass = ALLOCNO_CLASS (list->to);
975 cost = 0;
976 if (ALLOCNO_HARD_REGNO (list->to) < 0)
977 {
978 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
979 {
980 cost = ira_memory_move_cost[mode][aclass][0] * freq;
981 ira_store_cost += cost;
982 }
983 }
984 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
985 {
986 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
987 {
988 cost = ira_memory_move_cost[mode][aclass][0] * freq;
989 ira_load_cost += cost;
990 }
991 }
992 else
993 {
994 ira_init_register_move_cost_if_necessary (mode);
995 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
996 ira_shuffle_cost += cost;
997 }
998 ira_overall_cost += cost;
999 }
1000 result = get_insns ();
1001 end_sequence ();
1002 return result;
1003 }
1004
1005 /* Generate RTX move insns from move lists attached to basic blocks
1006 and edges. */
1007 static void
1008 emit_moves (void)
1009 {
1010 basic_block bb;
1011 edge_iterator ei;
1012 edge e;
1013 rtx_insn *insns, *tmp;
1014
1015 FOR_EACH_BB_FN (bb, cfun)
1016 {
1017 if (at_bb_start[bb->index] != NULL)
1018 {
1019 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1020 insns = emit_move_list (at_bb_start[bb->index],
1021 REG_FREQ_FROM_BB (bb));
1022 tmp = BB_HEAD (bb);
1023 if (LABEL_P (tmp))
1024 tmp = NEXT_INSN (tmp);
1025 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1026 tmp = NEXT_INSN (tmp);
1027 if (tmp == BB_HEAD (bb))
1028 emit_insn_before (insns, tmp);
1029 else if (tmp != NULL_RTX)
1030 emit_insn_after (insns, PREV_INSN (tmp));
1031 else
1032 emit_insn_after (insns, get_last_insn ());
1033 }
1034
1035 if (at_bb_end[bb->index] != NULL)
1036 {
1037 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1038 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1039 ira_assert (! control_flow_insn_p (BB_END (bb)));
1040 emit_insn_after (insns, BB_END (bb));
1041 }
1042
1043 FOR_EACH_EDGE (e, ei, bb->succs)
1044 {
1045 if (e->aux == NULL)
1046 continue;
1047 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1048 || ! EDGE_CRITICAL_P (e));
1049 e->aux = modify_move_list ((move_t) e->aux);
1050 insert_insn_on_edge
1051 (emit_move_list ((move_t) e->aux,
1052 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1053 e);
1054 if (e->src->next_bb != e->dest)
1055 ira_additional_jumps_num++;
1056 }
1057 }
1058 }
1059
1060 /* Update costs of A and corresponding allocnos on upper levels on the
1061 loop tree from reading (if READ_P) or writing A on an execution
1062 path with FREQ. */
1063 static void
1064 update_costs (ira_allocno_t a, bool read_p, int freq)
1065 {
1066 ira_loop_tree_node_t parent;
1067
1068 for (;;)
1069 {
1070 ALLOCNO_NREFS (a)++;
1071 ALLOCNO_FREQ (a) += freq;
1072 ALLOCNO_MEMORY_COST (a)
1073 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
1074 [read_p ? 1 : 0] * freq);
1075 if (ALLOCNO_CAP (a) != NULL)
1076 a = ALLOCNO_CAP (a);
1077 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1078 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1079 break;
1080 }
1081 }
1082
1083 /* Process moves from LIST with execution FREQ to add ranges, copies,
1084 and modify costs for allocnos involved in the moves. All regnos
1085 living through the list is in LIVE_THROUGH, and the loop tree node
1086 used to find corresponding allocnos is NODE. */
1087 static void
1088 add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1089 bitmap live_through, int freq)
1090 {
1091 int start, n;
1092 unsigned int regno;
1093 move_t move;
1094 ira_allocno_t a;
1095 ira_copy_t cp;
1096 live_range_t r;
1097 bitmap_iterator bi;
1098 HARD_REG_SET hard_regs_live;
1099
1100 if (list == NULL)
1101 return;
1102 n = 0;
1103 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1104 n++;
1105 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1106 /* This is a trick to guarantee that new ranges is not merged with
1107 the old ones. */
1108 ira_max_point++;
1109 start = ira_max_point;
1110 for (move = list; move != NULL; move = move->next)
1111 {
1112 ira_allocno_t from = move->from;
1113 ira_allocno_t to = move->to;
1114 int nr, i;
1115
1116 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1117 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
1118
1119 nr = ALLOCNO_NUM_OBJECTS (to);
1120 for (i = 0; i < nr; i++)
1121 {
1122 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1123 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1124 {
1125 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1126 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
1127 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
1128 ira_allocate_object_conflicts (to_obj, n);
1129 }
1130 }
1131 ior_hard_reg_conflicts (from, &hard_regs_live);
1132 ior_hard_reg_conflicts (to, &hard_regs_live);
1133
1134 update_costs (from, true, freq);
1135 update_costs (to, false, freq);
1136 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
1137 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1138 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1139 cp->num, ALLOCNO_NUM (cp->first),
1140 REGNO (allocno_emit_reg (cp->first)),
1141 ALLOCNO_NUM (cp->second),
1142 REGNO (allocno_emit_reg (cp->second)));
1143
1144 nr = ALLOCNO_NUM_OBJECTS (from);
1145 for (i = 0; i < nr; i++)
1146 {
1147 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1148 r = OBJECT_LIVE_RANGES (from_obj);
1149 if (r == NULL || r->finish >= 0)
1150 {
1151 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1152 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1153 fprintf (ira_dump_file,
1154 " Adding range [%d..%d] to allocno a%dr%d\n",
1155 start, ira_max_point, ALLOCNO_NUM (from),
1156 REGNO (allocno_emit_reg (from)));
1157 }
1158 else
1159 {
1160 r->finish = ira_max_point;
1161 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1162 fprintf (ira_dump_file,
1163 " Adding range [%d..%d] to allocno a%dr%d\n",
1164 r->start, ira_max_point, ALLOCNO_NUM (from),
1165 REGNO (allocno_emit_reg (from)));
1166 }
1167 }
1168 ira_max_point++;
1169 nr = ALLOCNO_NUM_OBJECTS (to);
1170 for (i = 0; i < nr; i++)
1171 {
1172 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1173 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
1174 }
1175 ira_max_point++;
1176 }
1177 for (move = list; move != NULL; move = move->next)
1178 {
1179 int nr, i;
1180 nr = ALLOCNO_NUM_OBJECTS (move->to);
1181 for (i = 0; i < nr; i++)
1182 {
1183 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1184 r = OBJECT_LIVE_RANGES (to_obj);
1185 if (r->finish < 0)
1186 {
1187 r->finish = ira_max_point - 1;
1188 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1189 fprintf (ira_dump_file,
1190 " Adding range [%d..%d] to allocno a%dr%d\n",
1191 r->start, r->finish, ALLOCNO_NUM (move->to),
1192 REGNO (allocno_emit_reg (move->to)));
1193 }
1194 }
1195 }
1196 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1197 {
1198 ira_allocno_t to;
1199 int nr, i;
1200
1201 a = node->regno_allocno_map[regno];
1202 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
1203 a = to;
1204 nr = ALLOCNO_NUM_OBJECTS (a);
1205 for (i = 0; i < nr; i++)
1206 {
1207 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1208 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1209 }
1210 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1211 fprintf
1212 (ira_dump_file,
1213 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1214 start, ira_max_point - 1,
1215 to != NULL ? "upper level" : "",
1216 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
1217 }
1218 }
1219
1220 /* Process all move list to add ranges, conflicts, copies, and modify
1221 costs for allocnos involved in the moves. */
1222 static void
1223 add_ranges_and_copies (void)
1224 {
1225 basic_block bb;
1226 edge_iterator ei;
1227 edge e;
1228 ira_loop_tree_node_t node;
1229 bitmap live_through;
1230
1231 live_through = ira_allocate_bitmap ();
1232 FOR_EACH_BB_FN (bb, cfun)
1233 {
1234 /* It does not matter what loop_tree_node (of source or
1235 destination block) to use for searching allocnos by their
1236 regnos because of subsequent IR flattening. */
1237 node = IRA_BB_NODE (bb)->parent;
1238 bitmap_copy (live_through, df_get_live_in (bb));
1239 add_range_and_copies_from_move_list
1240 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1241 bitmap_copy (live_through, df_get_live_out (bb));
1242 add_range_and_copies_from_move_list
1243 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1244 FOR_EACH_EDGE (e, ei, bb->succs)
1245 {
1246 bitmap_and (live_through,
1247 df_get_live_in (e->dest), df_get_live_out (bb));
1248 add_range_and_copies_from_move_list
1249 ((move_t) e->aux, node, live_through,
1250 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1251 }
1252 }
1253 ira_free_bitmap (live_through);
1254 }
1255
1256 /* The entry function changes code and generates shuffling allocnos on
1257 region borders for the regional (LOOPS_P is TRUE in this case)
1258 register allocation. */
1259 void
1260 ira_emit (bool loops_p)
1261 {
1262 basic_block bb;
1263 rtx_insn *insn;
1264 edge_iterator ei;
1265 edge e;
1266 ira_allocno_t a;
1267 ira_allocno_iterator ai;
1268 size_t sz;
1269
1270 FOR_EACH_ALLOCNO (a, ai)
1271 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
1272 if (! loops_p)
1273 return;
1274 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1275 at_bb_start = (move_t *) ira_allocate (sz);
1276 memset (at_bb_start, 0, sz);
1277 at_bb_end = (move_t *) ira_allocate (sz);
1278 memset (at_bb_end, 0, sz);
1279 local_allocno_bitmap = ira_allocate_bitmap ();
1280 used_regno_bitmap = ira_allocate_bitmap ();
1281 renamed_regno_bitmap = ira_allocate_bitmap ();
1282 max_regno_before_changing = max_reg_num ();
1283 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1284 set_allocno_somewhere_renamed_p ();
1285 ira_free_bitmap (used_regno_bitmap);
1286 ira_free_bitmap (renamed_regno_bitmap);
1287 ira_free_bitmap (local_allocno_bitmap);
1288 setup_entered_from_non_parent_p ();
1289 FOR_EACH_BB_FN (bb, cfun)
1290 {
1291 at_bb_start[bb->index] = NULL;
1292 at_bb_end[bb->index] = NULL;
1293 FOR_EACH_EDGE (e, ei, bb->succs)
1294 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1295 generate_edge_moves (e);
1296 }
1297 allocno_last_set
1298 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1299 allocno_last_set_check
1300 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1301 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1302 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1303 curr_tick = 0;
1304 FOR_EACH_BB_FN (bb, cfun)
1305 unify_moves (bb, true);
1306 FOR_EACH_BB_FN (bb, cfun)
1307 unify_moves (bb, false);
1308 move_vec.create (ira_allocnos_num);
1309 emit_moves ();
1310 add_ranges_and_copies ();
1311 /* Clean up: */
1312 FOR_EACH_BB_FN (bb, cfun)
1313 {
1314 free_move_list (at_bb_start[bb->index]);
1315 free_move_list (at_bb_end[bb->index]);
1316 FOR_EACH_EDGE (e, ei, bb->succs)
1317 {
1318 free_move_list ((move_t) e->aux);
1319 e->aux = NULL;
1320 }
1321 }
1322 move_vec.release ();
1323 ira_free (allocno_last_set_check);
1324 ira_free (allocno_last_set);
1325 commit_edge_insertions ();
1326 /* Fix insn codes. It is necessary to do it before reload because
1327 reload assumes initial insn codes defined. The insn codes can be
1328 invalidated by CFG infrastructure for example in jump
1329 redirection. */
1330 FOR_EACH_BB_FN (bb, cfun)
1331 FOR_BB_INSNS_REVERSE (bb, insn)
1332 if (INSN_P (insn))
1333 recog_memoized (insn);
1334 ira_free (at_bb_end);
1335 ira_free (at_bb_start);
1336 }