]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ira-emit.c
gcc/ada/
[thirdparty/gcc.git] / gcc / ira-emit.c
CommitLineData
47dd2e78 1/* Integrated Register Allocator. Changing code and generating moves.
3aea1f79 2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
47dd2e78 3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
66d9a7b9 21/* When we have more one region, we need to change the original RTL
22 code after coloring. Let us consider two allocnos representing the
23 same pseudo-register outside and inside a region respectively.
24 They can get different hard-registers. The reload pass works on
25 pseudo registers basis and there is no way to say the reload that
26 pseudo could be in different registers and it is even more
27 difficult to say in what places of the code the pseudo should have
28 particular hard-registers. So in this case IRA has to create and
29 use a new pseudo-register inside the region and adds code to move
30 allocno values on the region's borders. This is done by the code
31 in this file.
32
33 The code makes top-down traversal of the regions and generate new
34 pseudos and the move code on the region borders. In some
35 complicated cases IRA can create a new pseudo used temporarily to
36 move allocno values when a swap of values stored in two
37 hard-registers is needed (e.g. two allocnos representing different
38 pseudos outside region got respectively hard registers 1 and 2 and
39 the corresponding allocnos inside the region got respectively hard
40 registers 2 and 1). At this stage, the new pseudo is marked as
41 spilled.
42
43 IRA still creates the pseudo-register and the moves on the region
44 borders even when the both corresponding allocnos were assigned to
45 the same hard-register. It is done because, if the reload pass for
46 some reason spills a pseudo-register representing the original
47 pseudo outside or inside the region, the effect will be smaller
48 because another pseudo will still be in the hard-register. In most
49 cases, this is better then spilling the original pseudo in its
50 whole live-range. If reload does not change the allocation for the
51 two pseudo-registers, the trivial move will be removed by
52 post-reload optimizations.
53
54 IRA does not generate a new pseudo and moves for the allocno values
55 if the both allocnos representing an original pseudo inside and
56 outside region assigned to the same hard register when the register
57 pressure in the region for the corresponding pressure class is less
58 than number of available hard registers for given pressure class.
59
60 IRA also does some optimizations to remove redundant moves which is
61 transformed into stores by the reload pass on CFG edges
62 representing exits from the region.
63
64 IRA tries to reduce duplication of code generated on CFG edges
65 which are enters and exits to/from regions by moving some code to
66 the edge sources or destinations when it is possible. */
47dd2e78 67
68#include "config.h"
69#include "system.h"
70#include "coretypes.h"
71#include "tm.h"
72#include "regs.h"
73#include "rtl.h"
74#include "tm_p.h"
75#include "target.h"
76#include "flags.h"
77#include "obstack.h"
78#include "bitmap.h"
79#include "hard-reg-set.h"
94ea8568 80#include "predict.h"
81#include "vec.h"
82#include "hashtab.h"
83#include "hash-set.h"
84#include "machmode.h"
85#include "input.h"
86#include "function.h"
87#include "dominance.h"
88#include "cfg.h"
89#include "cfgrtl.h"
90#include "cfgbuild.h"
47dd2e78 91#include "basic-block.h"
92#include "expr.h"
93#include "recog.h"
94#include "params.h"
47dd2e78 95#include "reload.h"
47dd2e78 96#include "df.h"
97#include "ira-int.h"
98
99
66d9a7b9 100/* Data used to emit live range split insns and to flattening IR. */
101ira_emit_data_t ira_allocno_emit_data;
102
103/* Definitions for vectors of pointers. */
104typedef void *void_p;
66d9a7b9 105
106/* Pointers to data allocated for allocnos being created during
107 emitting. Usually there are quite few such allocnos because they
108 are created only for resolving loop in register shuffling. */
f1f41a6c 109static vec<void_p> new_allocno_emit_data_vec;
66d9a7b9 110
111/* Allocate and initiate the emit data. */
112void
113ira_initiate_emit_data (void)
114{
115 ira_allocno_t a;
116 ira_allocno_iterator ai;
117
118 ira_allocno_emit_data
119 = (ira_emit_data_t) ira_allocate (ira_allocnos_num
120 * sizeof (struct ira_emit_data));
121 memset (ira_allocno_emit_data, 0,
122 ira_allocnos_num * sizeof (struct ira_emit_data));
123 FOR_EACH_ALLOCNO (a, ai)
124 ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
f1f41a6c 125 new_allocno_emit_data_vec.create (50);
66d9a7b9 126
127}
128
129/* Free the emit data. */
130void
131ira_finish_emit_data (void)
132{
133 void_p p;
134 ira_allocno_t a;
135 ira_allocno_iterator ai;
136
137 ira_free (ira_allocno_emit_data);
138 FOR_EACH_ALLOCNO (a, ai)
139 ALLOCNO_ADD_DATA (a) = NULL;
f1f41a6c 140 for (;new_allocno_emit_data_vec.length () != 0;)
66d9a7b9 141 {
f1f41a6c 142 p = new_allocno_emit_data_vec.pop ();
66d9a7b9 143 ira_free (p);
144 }
f1f41a6c 145 new_allocno_emit_data_vec.release ();
66d9a7b9 146}
147
148/* Create and return a new allocno with given REGNO and
149 LOOP_TREE_NODE. Allocate emit data for it. */
150static ira_allocno_t
151create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
152{
153 ira_allocno_t a;
154
155 a = ira_create_allocno (regno, false, loop_tree_node);
156 ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
157 memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
f1f41a6c 158 new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a));
66d9a7b9 159 return a;
160}
161
162\f
163
164/* See comments below. */
47dd2e78 165typedef struct move *move_t;
166
167/* The structure represents an allocno move. Both allocnos have the
9d75589a 168 same original regno but different allocation. */
47dd2e78 169struct move
170{
171 /* The allocnos involved in the move. */
172 ira_allocno_t from, to;
173 /* The next move in the move sequence. */
174 move_t next;
175 /* Used for finding dependencies. */
176 bool visited_p;
177 /* The size of the following array. */
178 int deps_num;
179 /* Moves on which given move depends on. Dependency can be cyclic.
180 It means we need a temporary to generates the moves. Sequence
181 A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and
182 B1 are assigned to reg R2 is an example of the cyclic
183 dependencies. */
184 move_t *deps;
185 /* First insn generated for the move. */
56067879 186 rtx_insn *insn;
47dd2e78 187};
188
189/* Array of moves (indexed by BB index) which should be put at the
190 start/end of the corresponding basic blocks. */
191static move_t *at_bb_start, *at_bb_end;
192
193/* Max regno before renaming some pseudo-registers. For example, the
194 same pseudo-register can be renamed in a loop if its allocation is
195 different outside the loop. */
196static int max_regno_before_changing;
197
198/* Return new move of allocnos TO and FROM. */
199static move_t
200create_move (ira_allocno_t to, ira_allocno_t from)
201{
202 move_t move;
203
204 move = (move_t) ira_allocate (sizeof (struct move));
205 move->deps = NULL;
206 move->deps_num = 0;
207 move->to = to;
208 move->from = from;
209 move->next = NULL;
56067879 210 move->insn = NULL;
47dd2e78 211 move->visited_p = false;
212 return move;
213}
214
215/* Free memory for MOVE and its dependencies. */
216static void
217free_move (move_t move)
218{
219 if (move->deps != NULL)
220 ira_free (move->deps);
221 ira_free (move);
222}
223
224/* Free memory for list of the moves given by its HEAD. */
225static void
226free_move_list (move_t head)
227{
228 move_t next;
48e1416a 229
47dd2e78 230 for (; head != NULL; head = next)
231 {
232 next = head->next;
233 free_move (head);
234 }
235}
236
851d9296 237/* Return TRUE if the move list LIST1 and LIST2 are equal (two
47dd2e78 238 moves are equal if they involve the same allocnos). */
239static bool
240eq_move_lists_p (move_t list1, move_t list2)
241{
242 for (; list1 != NULL && list2 != NULL;
243 list1 = list1->next, list2 = list2->next)
244 if (list1->from != list2->from || list1->to != list2->to)
245 return false;
246 return list1 == list2;
247}
248
0c42b4f4 249/* Print move list LIST into file F. */
250static void
251print_move_list (FILE *f, move_t list)
252{
253 for (; list != NULL; list = list->next)
254 fprintf (f, " a%dr%d->a%dr%d",
255 ALLOCNO_NUM (list->from), ALLOCNO_REGNO (list->from),
256 ALLOCNO_NUM (list->to), ALLOCNO_REGNO (list->to));
257 fprintf (f, "\n");
258}
259
260extern void ira_debug_move_list (move_t list);
261
262/* Print move list LIST into stderr. */
263void
264ira_debug_move_list (move_t list)
265{
266 print_move_list (stderr, list);
267}
268
47dd2e78 269/* This recursive function changes pseudo-registers in *LOC if it is
270 necessary. The function returns TRUE if a change was done. */
271static bool
272change_regs (rtx *loc)
273{
274 int i, regno, result = false;
275 const char *fmt;
276 enum rtx_code code;
2fdae035 277 rtx reg;
47dd2e78 278
279 if (*loc == NULL_RTX)
280 return false;
281 code = GET_CODE (*loc);
282 if (code == REG)
283 {
284 regno = REGNO (*loc);
285 if (regno < FIRST_PSEUDO_REGISTER)
286 return false;
287 if (regno >= max_regno_before_changing)
288 /* It is a shared register which was changed already. */
289 return false;
290 if (ira_curr_regno_allocno_map[regno] == NULL)
291 return false;
66d9a7b9 292 reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
2fdae035 293 if (reg == *loc)
294 return false;
295 *loc = reg;
47dd2e78 296 return true;
297 }
298
299 fmt = GET_RTX_FORMAT (code);
300 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
301 {
302 if (fmt[i] == 'e')
303 result = change_regs (&XEXP (*loc, i)) || result;
304 else if (fmt[i] == 'E')
305 {
306 int j;
307
308 for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
309 result = change_regs (&XVECEXP (*loc, i, j)) || result;
310 }
311 }
312 return result;
313}
314
e149ca56 315static bool
316change_regs_in_insn (rtx_insn **insn_ptr)
317{
318 rtx rtx = *insn_ptr;
319 bool result = change_regs (&rtx);
320 *insn_ptr = as_a <rtx_insn *> (rtx);
321 return result;
322}
323
47dd2e78 324/* Attach MOVE to the edge E. The move is attached to the head of the
325 list if HEAD_P is TRUE. */
326static void
327add_to_edge_list (edge e, move_t move, bool head_p)
328{
329 move_t last;
330
331 if (head_p || e->aux == NULL)
332 {
333 move->next = (move_t) e->aux;
334 e->aux = move;
335 }
336 else
337 {
338 for (last = (move_t) e->aux; last->next != NULL; last = last->next)
339 ;
340 last->next = move;
341 move->next = NULL;
342 }
343}
344
345/* Create and return new pseudo-register with the same attributes as
346 ORIGINAL_REG. */
fe9cf48d 347rtx
348ira_create_new_reg (rtx original_reg)
47dd2e78 349{
350 rtx new_reg;
351
352 new_reg = gen_reg_rtx (GET_MODE (original_reg));
353 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original_reg);
354 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original_reg);
355 REG_POINTER (new_reg) = REG_POINTER (original_reg);
356 REG_ATTRS (new_reg) = REG_ATTRS (original_reg);
357 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
358 fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n",
359 REGNO (new_reg), REGNO (original_reg));
c6a6cdaa 360 ira_expand_reg_equiv ();
47dd2e78 361 return new_reg;
362}
363
364/* Return TRUE if loop given by SUBNODE inside the loop given by
365 NODE. */
366static bool
367subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node)
368{
369 for (; subnode != NULL; subnode = subnode->parent)
370 if (subnode == node)
371 return true;
372 return false;
373}
374
375/* Set up member `reg' to REG for allocnos which has the same regno as
376 ALLOCNO and which are inside the loop corresponding to ALLOCNO. */
377static void
378set_allocno_reg (ira_allocno_t allocno, rtx reg)
379{
380 int regno;
381 ira_allocno_t a;
382 ira_loop_tree_node_t node;
383
384 node = ALLOCNO_LOOP_TREE_NODE (allocno);
385 for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)];
386 a != NULL;
387 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
388 if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
66d9a7b9 389 ALLOCNO_EMIT_DATA (a)->reg = reg;
47dd2e78 390 for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
66d9a7b9 391 ALLOCNO_EMIT_DATA (a)->reg = reg;
47dd2e78 392 regno = ALLOCNO_REGNO (allocno);
393 for (a = allocno;;)
394 {
395 if (a == NULL || (a = ALLOCNO_CAP (a)) == NULL)
396 {
397 node = node->parent;
398 if (node == NULL)
399 break;
400 a = node->regno_allocno_map[regno];
401 }
402 if (a == NULL)
403 continue;
66d9a7b9 404 if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
47dd2e78 405 break;
66d9a7b9 406 ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
47dd2e78 407 }
408}
409
0b1329df 410/* Return true if there is an entry to given loop not from its parent
411 (or grandparent) block. For example, it is possible for two
412 adjacent loops inside another loop. */
47dd2e78 413static bool
0b1329df 414entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
415{
416 ira_loop_tree_node_t bb_node, src_loop_node, parent;
417 edge e;
418 edge_iterator ei;
419
66d9a7b9 420 for (bb_node = loop_node->children;
421 bb_node != NULL;
422 bb_node = bb_node->next)
0b1329df 423 if (bb_node->bb != NULL)
424 {
425 FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
34154e27 426 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
0b1329df 427 && (src_loop_node = IRA_BB_NODE (e->src)->parent) != loop_node)
428 {
429 for (parent = src_loop_node->parent;
430 parent != NULL;
431 parent = parent->parent)
432 if (parent == loop_node)
433 break;
434 if (parent != NULL)
435 /* That is an exit from a nested loop -- skip it. */
436 continue;
437 for (parent = loop_node->parent;
438 parent != NULL;
439 parent = parent->parent)
440 if (src_loop_node == parent)
441 break;
442 if (parent == NULL)
443 return true;
444 }
445 }
446 return false;
447}
448
449/* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */
450static void
451setup_entered_from_non_parent_p (void)
452{
453 unsigned int i;
454 loop_p loop;
455
9f8ac546 456 ira_assert (current_loops != NULL);
41f75a99 457 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
0b1329df 458 if (ira_loop_nodes[i].regno_allocno_map != NULL)
459 ira_loop_nodes[i].entered_from_non_parent_p
460 = entered_from_non_parent_p (&ira_loop_nodes[i]);
461}
462
463/* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to
9d75589a 464 DEST_ALLOCNO (assigned to memory) can be removed because it does
0b1329df 465 not change value of the destination. One possible reason for this
466 is the situation when SRC_ALLOCNO is not modified in the
467 corresponding loop. */
468static bool
469store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
47dd2e78 470{
471 int regno, orig_regno;
472 ira_allocno_t a;
473 ira_loop_tree_node_t node;
474
475 ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
476 && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
477 orig_regno = ALLOCNO_REGNO (src_allocno);
66d9a7b9 478 regno = REGNO (allocno_emit_reg (dest_allocno));
47dd2e78 479 for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
480 node != NULL;
481 node = node->parent)
0b1329df 482 {
483 a = node->regno_allocno_map[orig_regno];
484 ira_assert (a != NULL);
66d9a7b9 485 if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
0b1329df 486 /* We achieved the destination and everything is ok. */
487 return true;
488 else if (bitmap_bit_p (node->modified_regnos, orig_regno))
489 return false;
490 else if (node->entered_from_non_parent_p)
491 /* If there is a path from a destination loop block to the
492 source loop header containing basic blocks of non-parents
493 (grandparents) of the source loop, we should have checked
494 modifications of the pseudo on this path too to decide
495 about possibility to remove the store. It could be done by
496 solving a data-flow problem. Unfortunately such global
497 solution would complicate IR flattening. Therefore we just
498 prohibit removal of the store in such complicated case. */
499 return false;
500 }
588c6733 501 /* It is actually a loop entry -- do not remove the store. */
502 return false;
47dd2e78 503}
504
505/* Generate and attach moves to the edge E. This looks at the final
506 regnos of allocnos living on the edge with the same original regno
507 to figure out when moves should be generated. */
508static void
509generate_edge_moves (edge e)
510{
511 ira_loop_tree_node_t src_loop_node, dest_loop_node;
512 unsigned int regno;
513 bitmap_iterator bi;
514 ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map;
515 move_t move;
0841d295 516 bitmap regs_live_in_dest, regs_live_out_src;
47dd2e78 517
518 src_loop_node = IRA_BB_NODE (e->src)->parent;
519 dest_loop_node = IRA_BB_NODE (e->dest)->parent;
520 e->aux = NULL;
521 if (src_loop_node == dest_loop_node)
522 return;
523 src_map = src_loop_node->regno_allocno_map;
524 dest_map = dest_loop_node->regno_allocno_map;
0841d295 525 regs_live_in_dest = df_get_live_in (e->dest);
526 regs_live_out_src = df_get_live_out (e->src);
527 EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,
47dd2e78 528 FIRST_PSEUDO_REGISTER, regno, bi)
0841d295 529 if (bitmap_bit_p (regs_live_out_src, regno))
47dd2e78 530 {
531 src_allocno = src_map[regno];
532 dest_allocno = dest_map[regno];
66d9a7b9 533 if (REGNO (allocno_emit_reg (src_allocno))
534 == REGNO (allocno_emit_reg (dest_allocno)))
47dd2e78 535 continue;
76b340db 536 /* Remove unnecessary stores at the region exit. We should do
537 this for readonly memory for sure and this is guaranteed by
538 that we never generate moves on region borders (see
c6a6cdaa 539 checking in function change_loop). */
47dd2e78 540 if (ALLOCNO_HARD_REGNO (dest_allocno) < 0
541 && ALLOCNO_HARD_REGNO (src_allocno) >= 0
0b1329df 542 && store_can_be_removed_p (src_allocno, dest_allocno))
47dd2e78 543 {
66d9a7b9 544 ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
545 ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
47dd2e78 546 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
547 fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
548 regno, ALLOCNO_NUM (src_allocno),
549 ALLOCNO_NUM (dest_allocno));
550 continue;
551 }
552 move = create_move (dest_allocno, src_allocno);
553 add_to_edge_list (e, move, true);
554 }
555}
556
557/* Bitmap of allocnos local for the current loop. */
558static bitmap local_allocno_bitmap;
559
560/* This bitmap is used to find that we need to generate and to use a
561 new pseudo-register when processing allocnos with the same original
562 regno. */
563static bitmap used_regno_bitmap;
564
565/* This bitmap contains regnos of allocnos which were renamed locally
566 because the allocnos correspond to disjoint live ranges in loops
567 with a common parent. */
568static bitmap renamed_regno_bitmap;
569
570/* Change (if necessary) pseudo-registers inside loop given by loop
571 tree node NODE. */
572static void
573change_loop (ira_loop_tree_node_t node)
574{
575 bitmap_iterator bi;
576 unsigned int i;
577 int regno;
578 bool used_p;
579 ira_allocno_t allocno, parent_allocno, *map;
e149ca56 580 rtx_insn *insn;
581 rtx original_reg;
66d9a7b9 582 enum reg_class aclass, pclass;
47dd2e78 583 ira_loop_tree_node_t parent;
584
585 if (node != ira_loop_tree_root)
586 {
9f8ac546 587 ira_assert (current_loops != NULL);
588
47dd2e78 589 if (node->bb != NULL)
590 {
591 FOR_BB_INSNS (node->bb, insn)
e149ca56 592 if (INSN_P (insn) && change_regs_in_insn (&insn))
47dd2e78 593 {
594 df_insn_rescan (insn);
595 df_notes_rescan (insn);
596 }
597 return;
598 }
48e1416a 599
47dd2e78 600 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
601 fprintf (ira_dump_file,
602 " Changing RTL for loop %d (header bb%d)\n",
9f8ac546 603 node->loop_num, node->loop->header->index);
48e1416a 604
47dd2e78 605 parent = ira_curr_loop_tree_node->parent;
606 map = parent->regno_allocno_map;
607 EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,
608 0, i, bi)
609 {
610 allocno = ira_allocnos[i];
611 regno = ALLOCNO_REGNO (allocno);
66d9a7b9 612 aclass = ALLOCNO_CLASS (allocno);
613 pclass = ira_pressure_class_translate[aclass];
47dd2e78 614 parent_allocno = map[regno];
615 ira_assert (regno < ira_reg_equiv_len);
616 /* We generate the same hard register move because the
617 reload pass can put an allocno into memory in this case
618 we will have live range splitting. If it does not happen
619 such the same hard register moves will be removed. The
620 worst case when the both allocnos are put into memory by
621 the reload is very rare. */
622 if (parent_allocno != NULL
623 && (ALLOCNO_HARD_REGNO (allocno)
624 == ALLOCNO_HARD_REGNO (parent_allocno))
625 && (ALLOCNO_HARD_REGNO (allocno) < 0
66d9a7b9 626 || (parent->reg_pressure[pclass] + 1
1072fecf 627 <= ira_class_hard_regs_num[pclass])
47dd2e78 628 || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
629 [ALLOCNO_MODE (allocno)],
630 ALLOCNO_HARD_REGNO (allocno))
631 /* don't create copies because reload can spill an
632 allocno set by copy although the allocno will not
633 get memory slot. */
a9d8ab38 634 || ira_equiv_no_lvalue_p (regno)
635 || (pic_offset_table_rtx != NULL
636 && (ALLOCNO_REGNO (allocno)
637 == (int) REGNO (pic_offset_table_rtx)))))
47dd2e78 638 continue;
66d9a7b9 639 original_reg = allocno_emit_reg (allocno);
47dd2e78 640 if (parent_allocno == NULL
66d9a7b9 641 || (REGNO (allocno_emit_reg (parent_allocno))
642 == REGNO (original_reg)))
47dd2e78 643 {
644 if (internal_flag_ira_verbose > 3 && ira_dump_file)
645 fprintf (ira_dump_file, " %i vs parent %i:",
646 ALLOCNO_HARD_REGNO (allocno),
647 ALLOCNO_HARD_REGNO (parent_allocno));
fe9cf48d 648 set_allocno_reg (allocno, ira_create_new_reg (original_reg));
47dd2e78 649 }
650 }
651 }
652 /* Rename locals: Local allocnos with same regno in different loops
653 might get the different hard register. So we need to change
654 ALLOCNO_REG. */
655 bitmap_and_compl (local_allocno_bitmap,
2bae4acc 656 ira_curr_loop_tree_node->all_allocnos,
47dd2e78 657 ira_curr_loop_tree_node->border_allocnos);
658 EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)
659 {
660 allocno = ira_allocnos[i];
661 regno = ALLOCNO_REGNO (allocno);
662 if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
663 continue;
6ef9bbe0 664 used_p = !bitmap_set_bit (used_regno_bitmap, regno);
66d9a7b9 665 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
47dd2e78 666 if (! used_p)
667 continue;
668 bitmap_set_bit (renamed_regno_bitmap, regno);
fe9cf48d 669 set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno)));
47dd2e78 670 }
671}
672
673/* Process to set up flag somewhere_renamed_p. */
674static void
675set_allocno_somewhere_renamed_p (void)
676{
677 unsigned int regno;
678 ira_allocno_t allocno;
679 ira_allocno_iterator ai;
680
681 FOR_EACH_ALLOCNO (allocno, ai)
682 {
683 regno = ALLOCNO_REGNO (allocno);
684 if (bitmap_bit_p (renamed_regno_bitmap, regno)
66d9a7b9 685 && REGNO (allocno_emit_reg (allocno)) == regno)
686 ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
47dd2e78 687 }
688}
689
690/* Return TRUE if move lists on all edges given in vector VEC are
691 equal. */
692static bool
f1f41a6c 693eq_edge_move_lists_p (vec<edge, va_gc> *vec)
47dd2e78 694{
695 move_t list;
696 int i;
697
698 list = (move_t) EDGE_I (vec, 0)->aux;
699 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
700 if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)->aux))
701 return false;
702 return true;
703}
704
705/* Look at all entry edges (if START_P) or exit edges of basic block
706 BB and put move lists at the BB start or end if it is possible. In
707 other words, this decreases code duplication of allocno moves. */
708static void
709unify_moves (basic_block bb, bool start_p)
710{
711 int i;
712 edge e;
713 move_t list;
f1f41a6c 714 vec<edge, va_gc> *vec;
47dd2e78 715
716 vec = (start_p ? bb->preds : bb->succs);
717 if (EDGE_COUNT (vec) == 0 || ! eq_edge_move_lists_p (vec))
718 return;
719 e = EDGE_I (vec, 0);
720 list = (move_t) e->aux;
721 if (! start_p && control_flow_insn_p (BB_END (bb)))
722 return;
723 e->aux = NULL;
724 for (i = EDGE_COUNT (vec) - 1; i > 0; i--)
725 {
726 e = EDGE_I (vec, i);
727 free_move_list ((move_t) e->aux);
728 e->aux = NULL;
729 }
730 if (start_p)
731 at_bb_start[bb->index] = list;
732 else
733 at_bb_end[bb->index] = list;
734}
735
736/* Last move (in move sequence being processed) setting up the
737 corresponding hard register. */
738static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER];
739
740/* If the element value is equal to CURR_TICK then the corresponding
741 element in `hard_regno_last_set' is defined and correct. */
742static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER];
743
744/* Last move (in move sequence being processed) setting up the
745 corresponding allocno. */
746static move_t *allocno_last_set;
747
748/* If the element value is equal to CURR_TICK then the corresponding
749 element in . `allocno_last_set' is defined and correct. */
750static int *allocno_last_set_check;
751
752/* Definition of vector of moves. */
47dd2e78 753
754/* This vec contains moves sorted topologically (depth-first) on their
755 dependency graph. */
f1f41a6c 756static vec<move_t> move_vec;
47dd2e78 757
758/* The variable value is used to check correctness of values of
759 elements of arrays `hard_regno_last_set' and
760 `allocno_last_set_check'. */
761static int curr_tick;
762
763/* This recursive function traverses dependencies of MOVE and produces
764 topological sorting (in depth-first order). */
765static void
766traverse_moves (move_t move)
767{
768 int i;
769
770 if (move->visited_p)
771 return;
772 move->visited_p = true;
773 for (i = move->deps_num - 1; i >= 0; i--)
774 traverse_moves (move->deps[i]);
f1f41a6c 775 move_vec.safe_push (move);
47dd2e78 776}
777
778/* Remove unnecessary moves in the LIST, makes topological sorting,
779 and removes cycles on hard reg dependencies by introducing new
780 allocnos assigned to memory and additional moves. It returns the
781 result move list. */
782static move_t
783modify_move_list (move_t list)
784{
785 int i, n, nregs, hard_regno;
ae9587ed 786 ira_allocno_t to, from;
47dd2e78 787 move_t move, new_move, set_move, first, last;
788
789 if (list == NULL)
790 return NULL;
b59bd98f 791 /* Create move deps. */
47dd2e78 792 curr_tick++;
793 for (move = list; move != NULL; move = move->next)
794 {
795 to = move->to;
796 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
797 continue;
798 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
799 for (i = 0; i < nregs; i++)
800 {
801 hard_regno_last_set[hard_regno + i] = move;
802 hard_regno_last_set_check[hard_regno + i] = curr_tick;
803 }
804 }
805 for (move = list; move != NULL; move = move->next)
806 {
807 from = move->from;
808 to = move->to;
809 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
810 {
811 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
812 for (n = i = 0; i < nregs; i++)
813 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
814 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
815 != ALLOCNO_REGNO (from)))
816 n++;
817 move->deps = (move_t *) ira_allocate (n * sizeof (move_t));
818 for (n = i = 0; i < nregs; i++)
819 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
820 && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)
821 != ALLOCNO_REGNO (from)))
822 move->deps[n++] = hard_regno_last_set[hard_regno + i];
823 move->deps_num = n;
824 }
825 }
b59bd98f 826 /* Topological sorting: */
f1f41a6c 827 move_vec.truncate (0);
47dd2e78 828 for (move = list; move != NULL; move = move->next)
829 traverse_moves (move);
830 last = NULL;
f1f41a6c 831 for (i = (int) move_vec.length () - 1; i >= 0; i--)
47dd2e78 832 {
f1f41a6c 833 move = move_vec[i];
47dd2e78 834 move->next = NULL;
835 if (last != NULL)
836 last->next = move;
837 last = move;
838 }
f1f41a6c 839 first = move_vec.last ();
47dd2e78 840 /* Removing cycles: */
841 curr_tick++;
f1f41a6c 842 move_vec.truncate (0);
47dd2e78 843 for (move = first; move != NULL; move = move->next)
844 {
845 from = move->from;
846 to = move->to;
847 if ((hard_regno = ALLOCNO_HARD_REGNO (from)) >= 0)
848 {
849 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (from)];
850 for (i = 0; i < nregs; i++)
851 if (hard_regno_last_set_check[hard_regno + i] == curr_tick
852 && ALLOCNO_HARD_REGNO
853 (hard_regno_last_set[hard_regno + i]->to) >= 0)
854 {
be18556f 855 int n, j;
ae9587ed 856 ira_allocno_t new_allocno;
ae9587ed 857
47dd2e78 858 set_move = hard_regno_last_set[hard_regno + i];
859 /* It does not matter what loop_tree_node (of TO or
860 FROM) to use for the new allocno because of
861 subsequent IRA internal representation
862 flattening. */
863 new_allocno
66d9a7b9 864 = create_new_allocno (ALLOCNO_REGNO (set_move->to),
47dd2e78 865 ALLOCNO_LOOP_TREE_NODE (set_move->to));
866 ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
66d9a7b9 867 ira_set_allocno_class (new_allocno,
868 ALLOCNO_CLASS (set_move->to));
be18556f 869 ira_create_allocno_objects (new_allocno);
47dd2e78 870 ALLOCNO_ASSIGNED_P (new_allocno) = true;
871 ALLOCNO_HARD_REGNO (new_allocno) = -1;
66d9a7b9 872 ALLOCNO_EMIT_DATA (new_allocno)->reg
fe9cf48d 873 = ira_create_new_reg (allocno_emit_reg (set_move->to));
ae9587ed 874
47dd2e78 875 /* Make it possibly conflicting with all earlier
876 created allocnos. Cases where temporary allocnos
877 created to remove the cycles are quite rare. */
be18556f 878 n = ALLOCNO_NUM_OBJECTS (new_allocno);
879 gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to));
880 for (j = 0; j < n; j++)
881 {
882 ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j);
883
884 OBJECT_MIN (new_obj) = 0;
885 OBJECT_MAX (new_obj) = ira_objects_num - 1;
886 }
887
47dd2e78 888 new_move = create_move (set_move->to, new_allocno);
889 set_move->to = new_allocno;
f1f41a6c 890 move_vec.safe_push (new_move);
47dd2e78 891 ira_move_loops_num++;
892 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
893 fprintf (ira_dump_file,
894 " Creating temporary allocno a%dr%d\n",
895 ALLOCNO_NUM (new_allocno),
66d9a7b9 896 REGNO (allocno_emit_reg (new_allocno)));
47dd2e78 897 }
898 }
899 if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
900 continue;
901 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (to)];
902 for (i = 0; i < nregs; i++)
903 {
904 hard_regno_last_set[hard_regno + i] = move;
905 hard_regno_last_set_check[hard_regno + i] = curr_tick;
906 }
907 }
f1f41a6c 908 for (i = (int) move_vec.length () - 1; i >= 0; i--)
47dd2e78 909 {
f1f41a6c 910 move = move_vec[i];
47dd2e78 911 move->next = NULL;
912 last->next = move;
913 last = move;
914 }
915 return first;
916}
917
918/* Generate RTX move insns from the move list LIST. This updates
919 allocation cost using move execution frequency FREQ. */
56067879 920static rtx_insn *
47dd2e78 921emit_move_list (move_t list, int freq)
922{
c6a6cdaa 923 rtx to, from, dest;
924 int to_regno, from_regno, cost, regno;
56067879 925 rtx_insn *result, *insn;
926 rtx set;
3754d046 927 machine_mode mode;
66d9a7b9 928 enum reg_class aclass;
47dd2e78 929
c6a6cdaa 930 grow_reg_equivs ();
47dd2e78 931 start_sequence ();
932 for (; list != NULL; list = list->next)
933 {
934 start_sequence ();
c6a6cdaa 935 to = allocno_emit_reg (list->to);
936 to_regno = REGNO (to);
937 from = allocno_emit_reg (list->from);
938 from_regno = REGNO (from);
939 emit_move_insn (to, from);
47dd2e78 940 list->insn = get_insns ();
941 end_sequence ();
47dd2e78 942 for (insn = list->insn; insn != NULL_RTX; insn = NEXT_INSN (insn))
212e5c4f 943 {
944 /* The reload needs to have set up insn codes. If the
945 reload sets up insn codes by itself, it may fail because
946 insns will have hard registers instead of pseudos and
947 there may be no machine insn with given hard
948 registers. */
949 recog_memoized (insn);
950 /* Add insn to equiv init insn list if it is necessary.
951 Otherwise reload will not remove this insn if it decides
952 to use the equivalence. */
953 if ((set = single_set (insn)) != NULL_RTX)
954 {
c6a6cdaa 955 dest = SET_DEST (set);
956 if (GET_CODE (dest) == SUBREG)
957 dest = SUBREG_REG (dest);
958 ira_assert (REG_P (dest));
959 regno = REGNO (dest);
212e5c4f 960 if (regno >= ira_reg_equiv_len
c6a6cdaa 961 || (ira_reg_equiv[regno].invariant == NULL_RTX
962 && ira_reg_equiv[regno].constant == NULL_RTX))
212e5c4f 963 continue; /* regno has no equivalence. */
f1f41a6c 964 ira_assert ((int) reg_equivs->length () > regno);
212e5c4f 965 reg_equiv_init (regno)
966 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
967 }
968 }
376b4092 969 if (ira_use_lra_p)
970 ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn);
47dd2e78 971 emit_insn (list->insn);
972 mode = ALLOCNO_MODE (list->to);
66d9a7b9 973 aclass = ALLOCNO_CLASS (list->to);
47dd2e78 974 cost = 0;
975 if (ALLOCNO_HARD_REGNO (list->to) < 0)
976 {
977 if (ALLOCNO_HARD_REGNO (list->from) >= 0)
978 {
66d9a7b9 979 cost = ira_memory_move_cost[mode][aclass][0] * freq;
47dd2e78 980 ira_store_cost += cost;
981 }
982 }
983 else if (ALLOCNO_HARD_REGNO (list->from) < 0)
984 {
985 if (ALLOCNO_HARD_REGNO (list->to) >= 0)
986 {
66d9a7b9 987 cost = ira_memory_move_cost[mode][aclass][0] * freq;
47dd2e78 988 ira_load_cost += cost;
989 }
990 }
991 else
992 {
66d9a7b9 993 ira_init_register_move_cost_if_necessary (mode);
994 cost = ira_register_move_cost[mode][aclass][aclass] * freq;
47dd2e78 995 ira_shuffle_cost += cost;
996 }
997 ira_overall_cost += cost;
998 }
999 result = get_insns ();
1000 end_sequence ();
1001 return result;
1002}
1003
1004/* Generate RTX move insns from move lists attached to basic blocks
1005 and edges. */
1006static void
1007emit_moves (void)
1008{
1009 basic_block bb;
1010 edge_iterator ei;
1011 edge e;
56067879 1012 rtx_insn *insns, *tmp;
47dd2e78 1013
fc00614f 1014 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1015 {
1016 if (at_bb_start[bb->index] != NULL)
1017 {
1018 at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]);
1019 insns = emit_move_list (at_bb_start[bb->index],
1020 REG_FREQ_FROM_BB (bb));
1021 tmp = BB_HEAD (bb);
1022 if (LABEL_P (tmp))
1023 tmp = NEXT_INSN (tmp);
1024 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1025 tmp = NEXT_INSN (tmp);
1026 if (tmp == BB_HEAD (bb))
1027 emit_insn_before (insns, tmp);
1028 else if (tmp != NULL_RTX)
1029 emit_insn_after (insns, PREV_INSN (tmp));
1030 else
1031 emit_insn_after (insns, get_last_insn ());
1032 }
1033
1034 if (at_bb_end[bb->index] != NULL)
1035 {
1036 at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]);
1037 insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb));
1038 ira_assert (! control_flow_insn_p (BB_END (bb)));
1039 emit_insn_after (insns, BB_END (bb));
1040 }
1041
1042 FOR_EACH_EDGE (e, ei, bb->succs)
1043 {
1044 if (e->aux == NULL)
1045 continue;
1046 ira_assert ((e->flags & EDGE_ABNORMAL) == 0
1047 || ! EDGE_CRITICAL_P (e));
1048 e->aux = modify_move_list ((move_t) e->aux);
1049 insert_insn_on_edge
1050 (emit_move_list ((move_t) e->aux,
1051 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))),
1052 e);
1053 if (e->src->next_bb != e->dest)
1054 ira_additional_jumps_num++;
1055 }
1056 }
1057}
1058
1059/* Update costs of A and corresponding allocnos on upper levels on the
1060 loop tree from reading (if READ_P) or writing A on an execution
1061 path with FREQ. */
1062static void
1063update_costs (ira_allocno_t a, bool read_p, int freq)
1064{
1065 ira_loop_tree_node_t parent;
1066
1067 for (;;)
1068 {
1069 ALLOCNO_NREFS (a)++;
1070 ALLOCNO_FREQ (a) += freq;
1071 ALLOCNO_MEMORY_COST (a)
66d9a7b9 1072 += (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
47dd2e78 1073 [read_p ? 1 : 0] * freq);
1074 if (ALLOCNO_CAP (a) != NULL)
1075 a = ALLOCNO_CAP (a);
1076 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
1077 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
1078 break;
1079 }
1080}
1081
1082/* Process moves from LIST with execution FREQ to add ranges, copies,
1083 and modify costs for allocnos involved in the moves. All regnos
1084 living through the list is in LIVE_THROUGH, and the loop tree node
1085 used to find corresponding allocnos is NODE. */
1086static void
1087add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
1088 bitmap live_through, int freq)
1089{
1090 int start, n;
1091 unsigned int regno;
1092 move_t move;
ae9587ed 1093 ira_allocno_t a;
47dd2e78 1094 ira_copy_t cp;
fbff82f4 1095 live_range_t r;
47dd2e78 1096 bitmap_iterator bi;
1097 HARD_REG_SET hard_regs_live;
1098
1099 if (list == NULL)
1100 return;
1101 n = 0;
1102 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1103 n++;
1104 REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through);
1105 /* This is a trick to guarantee that new ranges is not merged with
1106 the old ones. */
1107 ira_max_point++;
1108 start = ira_max_point;
1109 for (move = list; move != NULL; move = move->next)
1110 {
ae9587ed 1111 ira_allocno_t from = move->from;
1112 ira_allocno_t to = move->to;
be18556f 1113 int nr, i;
1114
47dd2e78 1115 bitmap_clear_bit (live_through, ALLOCNO_REGNO (from));
1116 bitmap_clear_bit (live_through, ALLOCNO_REGNO (to));
be18556f 1117
1118 nr = ALLOCNO_NUM_OBJECTS (to);
1119 for (i = 0; i < nr; i++)
1120 {
1121 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1122 if (OBJECT_CONFLICT_ARRAY (to_obj) == NULL)
1123 {
1124 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1125 fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
66d9a7b9 1126 ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
be18556f 1127 ira_allocate_object_conflicts (to_obj, n);
1128 }
1129 }
1130 ior_hard_reg_conflicts (from, &hard_regs_live);
1131 ior_hard_reg_conflicts (to, &hard_regs_live);
1132
47dd2e78 1133 update_costs (from, true, freq);
1134 update_costs (to, false, freq);
b7c06809 1135 cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULL);
47dd2e78 1136 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1137 fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
1138 cp->num, ALLOCNO_NUM (cp->first),
66d9a7b9 1139 REGNO (allocno_emit_reg (cp->first)),
1140 ALLOCNO_NUM (cp->second),
1141 REGNO (allocno_emit_reg (cp->second)));
be18556f 1142
1143 nr = ALLOCNO_NUM_OBJECTS (from);
1144 for (i = 0; i < nr; i++)
47dd2e78 1145 {
be18556f 1146 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1147 r = OBJECT_LIVE_RANGES (from_obj);
1148 if (r == NULL || r->finish >= 0)
1149 {
1150 ira_add_live_range_to_object (from_obj, start, ira_max_point);
1151 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1152 fprintf (ira_dump_file,
1153 " Adding range [%d..%d] to allocno a%dr%d\n",
1154 start, ira_max_point, ALLOCNO_NUM (from),
66d9a7b9 1155 REGNO (allocno_emit_reg (from)));
be18556f 1156 }
1157 else
1158 {
1159 r->finish = ira_max_point;
1160 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1161 fprintf (ira_dump_file,
1162 " Adding range [%d..%d] to allocno a%dr%d\n",
1163 r->start, ira_max_point, ALLOCNO_NUM (from),
66d9a7b9 1164 REGNO (allocno_emit_reg (from)));
be18556f 1165 }
47dd2e78 1166 }
be18556f 1167 ira_max_point++;
1168 nr = ALLOCNO_NUM_OBJECTS (to);
1169 for (i = 0; i < nr; i++)
0c42b4f4 1170 {
be18556f 1171 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1172 ira_add_live_range_to_object (to_obj, ira_max_point, -1);
0c42b4f4 1173 }
47dd2e78 1174 ira_max_point++;
47dd2e78 1175 }
1176 for (move = list; move != NULL; move = move->next)
1177 {
be18556f 1178 int nr, i;
1179 nr = ALLOCNO_NUM_OBJECTS (move->to);
1180 for (i = 0; i < nr; i++)
47dd2e78 1181 {
be18556f 1182 ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i);
1183 r = OBJECT_LIVE_RANGES (to_obj);
1184 if (r->finish < 0)
1185 {
1186 r->finish = ira_max_point - 1;
1187 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1188 fprintf (ira_dump_file,
1189 " Adding range [%d..%d] to allocno a%dr%d\n",
1190 r->start, r->finish, ALLOCNO_NUM (move->to),
66d9a7b9 1191 REGNO (allocno_emit_reg (move->to)));
be18556f 1192 }
47dd2e78 1193 }
1194 }
1195 EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)
1196 {
ae9587ed 1197 ira_allocno_t to;
be18556f 1198 int nr, i;
1199
47dd2e78 1200 a = node->regno_allocno_map[regno];
66d9a7b9 1201 if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
0c42b4f4 1202 a = to;
be18556f 1203 nr = ALLOCNO_NUM_OBJECTS (a);
1204 for (i = 0; i < nr; i++)
1205 {
1206 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1207 ira_add_live_range_to_object (obj, start, ira_max_point - 1);
1208 }
0c42b4f4 1209 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1210 fprintf
1211 (ira_dump_file,
1212 " Adding range [%d..%d] to live through %s allocno a%dr%d\n",
1213 start, ira_max_point - 1,
1214 to != NULL ? "upper level" : "",
66d9a7b9 1215 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
47dd2e78 1216 }
1217}
1218
1219/* Process all move list to add ranges, conflicts, copies, and modify
1220 costs for allocnos involved in the moves. */
1221static void
1222add_ranges_and_copies (void)
1223{
1224 basic_block bb;
1225 edge_iterator ei;
1226 edge e;
1227 ira_loop_tree_node_t node;
1228 bitmap live_through;
1229
1230 live_through = ira_allocate_bitmap ();
fc00614f 1231 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1232 {
1233 /* It does not matter what loop_tree_node (of source or
1234 destination block) to use for searching allocnos by their
1235 regnos because of subsequent IR flattening. */
1236 node = IRA_BB_NODE (bb)->parent;
0841d295 1237 bitmap_copy (live_through, df_get_live_in (bb));
47dd2e78 1238 add_range_and_copies_from_move_list
1239 (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
0841d295 1240 bitmap_copy (live_through, df_get_live_out (bb));
47dd2e78 1241 add_range_and_copies_from_move_list
1242 (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb));
1243 FOR_EACH_EDGE (e, ei, bb->succs)
1244 {
0841d295 1245 bitmap_and (live_through,
1246 df_get_live_in (e->dest), df_get_live_out (bb));
47dd2e78 1247 add_range_and_copies_from_move_list
1248 ((move_t) e->aux, node, live_through,
1249 REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e)));
1250 }
1251 }
1252 ira_free_bitmap (live_through);
1253}
1254
1255/* The entry function changes code and generates shuffling allocnos on
1256 region borders for the regional (LOOPS_P is TRUE in this case)
1257 register allocation. */
1258void
1259ira_emit (bool loops_p)
1260{
1261 basic_block bb;
56067879 1262 rtx_insn *insn;
47dd2e78 1263 edge_iterator ei;
1264 edge e;
1265 ira_allocno_t a;
1266 ira_allocno_iterator ai;
fe672ac0 1267 size_t sz;
47dd2e78 1268
1269 FOR_EACH_ALLOCNO (a, ai)
66d9a7b9 1270 ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
47dd2e78 1271 if (! loops_p)
1272 return;
fe672ac0 1273 sz = sizeof (move_t) * last_basic_block_for_fn (cfun);
1274 at_bb_start = (move_t *) ira_allocate (sz);
1275 memset (at_bb_start, 0, sz);
1276 at_bb_end = (move_t *) ira_allocate (sz);
1277 memset (at_bb_end, 0, sz);
47dd2e78 1278 local_allocno_bitmap = ira_allocate_bitmap ();
1279 used_regno_bitmap = ira_allocate_bitmap ();
1280 renamed_regno_bitmap = ira_allocate_bitmap ();
1281 max_regno_before_changing = max_reg_num ();
1282 ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULL);
1283 set_allocno_somewhere_renamed_p ();
1284 ira_free_bitmap (used_regno_bitmap);
1285 ira_free_bitmap (renamed_regno_bitmap);
1286 ira_free_bitmap (local_allocno_bitmap);
0b1329df 1287 setup_entered_from_non_parent_p ();
fc00614f 1288 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1289 {
1290 at_bb_start[bb->index] = NULL;
1291 at_bb_end[bb->index] = NULL;
1292 FOR_EACH_EDGE (e, ei, bb->succs)
34154e27 1293 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
47dd2e78 1294 generate_edge_moves (e);
1295 }
1296 allocno_last_set
1297 = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ());
1298 allocno_last_set_check
1299 = (int *) ira_allocate (sizeof (int) * max_reg_num ());
1300 memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
1301 memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
1302 curr_tick = 0;
fc00614f 1303 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1304 unify_moves (bb, true);
fc00614f 1305 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1306 unify_moves (bb, false);
f1f41a6c 1307 move_vec.create (ira_allocnos_num);
47dd2e78 1308 emit_moves ();
1309 add_ranges_and_copies ();
1310 /* Clean up: */
fc00614f 1311 FOR_EACH_BB_FN (bb, cfun)
47dd2e78 1312 {
1313 free_move_list (at_bb_start[bb->index]);
1314 free_move_list (at_bb_end[bb->index]);
1315 FOR_EACH_EDGE (e, ei, bb->succs)
1316 {
1317 free_move_list ((move_t) e->aux);
1318 e->aux = NULL;
1319 }
1320 }
f1f41a6c 1321 move_vec.release ();
47dd2e78 1322 ira_free (allocno_last_set_check);
1323 ira_free (allocno_last_set);
1324 commit_edge_insertions ();
14792f4e 1325 /* Fix insn codes. It is necessary to do it before reload because
1326 reload assumes initial insn codes defined. The insn codes can be
1327 invalidated by CFG infrastructure for example in jump
1328 redirection. */
fc00614f 1329 FOR_EACH_BB_FN (bb, cfun)
14792f4e 1330 FOR_BB_INSNS_REVERSE (bb, insn)
1331 if (INSN_P (insn))
1332 recog_memoized (insn);
47dd2e78 1333 ira_free (at_bb_end);
1334 ira_free (at_bb_start);
1335}