]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ira-build.c
gimple-predict.h: New file.
[thirdparty/gcc.git] / gcc / ira-build.c
CommitLineData
058e97ec 1/* Building internal representation for IRA.
5624e564 2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
058e97ec
VM
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
9fdcd34e 25#include "predict.h"
058e97ec 26#include "rtl.h"
c7131fb2 27#include "df.h"
058e97ec
VM
28#include "tm_p.h"
29#include "target.h"
30#include "regs.h"
31#include "flags.h"
058e97ec
VM
32#include "insn-config.h"
33#include "recog.h"
718f9c0f 34#include "diagnostic-core.h"
058e97ec 35#include "params.h"
058e97ec
VM
36#include "reload.h"
37#include "sparseset.h"
c7131fb2
AM
38#include "cfgloop.h"
39#include "ira.h"
40#include "alloc-pool.h"
058e97ec 41#include "ira-int.h"
5936d944 42#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
058e97ec 43
070a1983 44static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
058e97ec
VM
45 ira_loop_tree_node_t);
46
47/* The root of the loop tree corresponding to the all function. */
48ira_loop_tree_node_t ira_loop_tree_root;
49
50/* Height of the loop tree. */
51int ira_loop_tree_height;
52
53/* All nodes representing basic blocks are referred through the
54 following array. We can not use basic block member `aux' for this
55 because it is used for insertion of insns on edges. */
56ira_loop_tree_node_t ira_bb_nodes;
57
58/* All nodes representing loops are referred through the following
59 array. */
60ira_loop_tree_node_t ira_loop_nodes;
61
caff7edf
JJ
62/* And size of the ira_loop_nodes array. */
63unsigned int ira_loop_nodes_count;
64
b8698a0f 65/* Map regno -> allocnos with given regno (see comments for
058e97ec
VM
66 allocno member `next_regno_allocno'). */
67ira_allocno_t *ira_regno_allocno_map;
68
69/* Array of references to all allocnos. The order number of the
70 allocno corresponds to the index in the array. Removed allocnos
71 have NULL element value. */
72ira_allocno_t *ira_allocnos;
73
74/* Sizes of the previous array. */
75int ira_allocnos_num;
76
a49ae217
BS
77/* Count of conflict record structures we've created, used when creating
78 a new conflict id. */
79int ira_objects_num;
80
81/* Map a conflict id to its conflict record. */
82ira_object_t *ira_object_id_map;
058e97ec 83
3b6d1699
VM
84/* Array of references to all allocno preferences. The order number
85 of the preference corresponds to the index in the array. */
86ira_pref_t *ira_prefs;
87
88/* Size of the previous array. */
89int ira_prefs_num;
90
058e97ec
VM
91/* Array of references to all copies. The order number of the copy
92 corresponds to the index in the array. Removed copies have NULL
93 element value. */
94ira_copy_t *ira_copies;
95
96/* Size of the previous array. */
97int ira_copies_num;
98
99\f
100
101/* LAST_BASIC_BLOCK before generating additional insns because of live
102 range splitting. Emitting insns on a critical edge creates a new
103 basic block. */
104static int last_basic_block_before_change;
105
2608d841
VM
106/* Initialize some members in loop tree node NODE. Use LOOP_NUM for
107 the member loop_num. */
058e97ec 108static void
2608d841
VM
109init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
110{
111 int max_regno = max_reg_num ();
112
113 node->regno_allocno_map
114 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
115 memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
116 memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
117 node->all_allocnos = ira_allocate_bitmap ();
118 node->modified_regnos = ira_allocate_bitmap ();
119 node->border_allocnos = ira_allocate_bitmap ();
120 node->local_copies = ira_allocate_bitmap ();
121 node->loop_num = loop_num;
122 node->children = NULL;
123 node->subloops = NULL;
124}
125
126
127/* The following function allocates the loop tree nodes. If
128 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
129 the root which corresponds the all function) will be not allocated
130 but nodes will still be allocated for basic blocks. */
131static void
132create_loop_tree_nodes (void)
058e97ec
VM
133{
134 unsigned int i, j;
058e97ec
VM
135 bool skip_p;
136 edge_iterator ei;
137 edge e;
9771b263 138 vec<edge> edges;
058e97ec
VM
139 loop_p loop;
140
141 ira_bb_nodes
142 = ((struct ira_loop_tree_node *)
8b1c6fd7
DM
143 ira_allocate (sizeof (struct ira_loop_tree_node)
144 * last_basic_block_for_fn (cfun)));
145 last_basic_block_before_change = last_basic_block_for_fn (cfun);
146 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
058e97ec
VM
147 {
148 ira_bb_nodes[i].regno_allocno_map = NULL;
149 memset (ira_bb_nodes[i].reg_pressure, 0,
150 sizeof (ira_bb_nodes[i].reg_pressure));
49d988e7 151 ira_bb_nodes[i].all_allocnos = NULL;
058e97ec
VM
152 ira_bb_nodes[i].modified_regnos = NULL;
153 ira_bb_nodes[i].border_allocnos = NULL;
154 ira_bb_nodes[i].local_copies = NULL;
155 }
2608d841
VM
156 if (current_loops == NULL)
157 {
caff7edf 158 ira_loop_nodes_count = 1;
2608d841
VM
159 ira_loop_nodes = ((struct ira_loop_tree_node *)
160 ira_allocate (sizeof (struct ira_loop_tree_node)));
161 init_loop_tree_node (ira_loop_nodes, 0);
162 return;
163 }
0fc822d0 164 ira_loop_nodes_count = number_of_loops (cfun);
058e97ec
VM
165 ira_loop_nodes = ((struct ira_loop_tree_node *)
166 ira_allocate (sizeof (struct ira_loop_tree_node)
caff7edf 167 * ira_loop_nodes_count));
0fc822d0 168 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
058e97ec 169 {
661bc682 170 if (loop_outer (loop) != NULL)
058e97ec
VM
171 {
172 ira_loop_nodes[i].regno_allocno_map = NULL;
058e97ec
VM
173 skip_p = false;
174 FOR_EACH_EDGE (e, ei, loop->header->preds)
175 if (e->src != loop->latch
176 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
177 {
178 skip_p = true;
179 break;
180 }
181 if (skip_p)
182 continue;
183 edges = get_loop_exit_edges (loop);
9771b263 184 FOR_EACH_VEC_ELT (edges, j, e)
058e97ec
VM
185 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
186 {
187 skip_p = true;
188 break;
189 }
9771b263 190 edges.release ();
058e97ec
VM
191 if (skip_p)
192 continue;
193 }
2608d841 194 init_loop_tree_node (&ira_loop_nodes[i], loop->num);
058e97ec
VM
195 }
196}
197
198/* The function returns TRUE if there are more one allocation
199 region. */
200static bool
201more_one_region_p (void)
202{
203 unsigned int i;
204 loop_p loop;
205
2608d841 206 if (current_loops != NULL)
0fc822d0 207 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2608d841
VM
208 if (ira_loop_nodes[i].regno_allocno_map != NULL
209 && ira_loop_tree_root != &ira_loop_nodes[i])
210 return true;
058e97ec
VM
211 return false;
212}
213
214/* Free the loop tree node of a loop. */
215static void
216finish_loop_tree_node (ira_loop_tree_node_t loop)
217{
218 if (loop->regno_allocno_map != NULL)
219 {
220 ira_assert (loop->bb == NULL);
221 ira_free_bitmap (loop->local_copies);
222 ira_free_bitmap (loop->border_allocnos);
223 ira_free_bitmap (loop->modified_regnos);
49d988e7 224 ira_free_bitmap (loop->all_allocnos);
058e97ec
VM
225 ira_free (loop->regno_allocno_map);
226 loop->regno_allocno_map = NULL;
227 }
228}
229
230/* Free the loop tree nodes. */
231static void
232finish_loop_tree_nodes (void)
233{
234 unsigned int i;
058e97ec 235
caff7edf
JJ
236 for (i = 0; i < ira_loop_nodes_count; i++)
237 finish_loop_tree_node (&ira_loop_nodes[i]);
058e97ec
VM
238 ira_free (ira_loop_nodes);
239 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
240 {
241 if (ira_bb_nodes[i].local_copies != NULL)
242 ira_free_bitmap (ira_bb_nodes[i].local_copies);
243 if (ira_bb_nodes[i].border_allocnos != NULL)
244 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
245 if (ira_bb_nodes[i].modified_regnos != NULL)
246 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
49d988e7
VM
247 if (ira_bb_nodes[i].all_allocnos != NULL)
248 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
058e97ec
VM
249 if (ira_bb_nodes[i].regno_allocno_map != NULL)
250 ira_free (ira_bb_nodes[i].regno_allocno_map);
251 }
252 ira_free (ira_bb_nodes);
253}
254
255\f
256
257/* The following recursive function adds LOOP to the loop tree
2608d841
VM
258 hierarchy. LOOP is added only once. If LOOP is NULL we adding
259 loop designating the whole function when CFG loops are not
260 built. */
058e97ec
VM
261static void
262add_loop_to_tree (struct loop *loop)
263{
2608d841 264 int loop_num;
058e97ec
VM
265 struct loop *parent;
266 ira_loop_tree_node_t loop_node, parent_node;
267
268 /* We can not use loop node access macros here because of potential
269 checking and because the nodes are not initialized enough
270 yet. */
2608d841 271 if (loop != NULL && loop_outer (loop) != NULL)
058e97ec 272 add_loop_to_tree (loop_outer (loop));
2608d841
VM
273 loop_num = loop != NULL ? loop->num : 0;
274 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
275 && ira_loop_nodes[loop_num].children == NULL)
058e97ec
VM
276 {
277 /* We have not added loop node to the tree yet. */
2608d841 278 loop_node = &ira_loop_nodes[loop_num];
058e97ec
VM
279 loop_node->loop = loop;
280 loop_node->bb = NULL;
2608d841
VM
281 if (loop == NULL)
282 parent = NULL;
283 else
284 {
285 for (parent = loop_outer (loop);
286 parent != NULL;
287 parent = loop_outer (parent))
288 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
289 break;
290 }
058e97ec
VM
291 if (parent == NULL)
292 {
293 loop_node->next = NULL;
294 loop_node->subloop_next = NULL;
295 loop_node->parent = NULL;
296 }
297 else
298 {
299 parent_node = &ira_loop_nodes[parent->num];
300 loop_node->next = parent_node->children;
301 parent_node->children = loop_node;
302 loop_node->subloop_next = parent_node->subloops;
303 parent_node->subloops = loop_node;
304 loop_node->parent = parent_node;
305 }
306 }
307}
308
309/* The following recursive function sets up levels of nodes of the
310 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
311 The function returns maximal value of level in the tree + 1. */
312static int
313setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
314{
315 int height, max_height;
316 ira_loop_tree_node_t subloop_node;
317
318 ira_assert (loop_node->bb == NULL);
319 loop_node->level = level;
320 max_height = level + 1;
321 for (subloop_node = loop_node->subloops;
322 subloop_node != NULL;
323 subloop_node = subloop_node->subloop_next)
324 {
325 ira_assert (subloop_node->bb == NULL);
326 height = setup_loop_tree_level (subloop_node, level + 1);
327 if (height > max_height)
328 max_height = height;
329 }
330 return max_height;
331}
332
333/* Create the loop tree. The algorithm is designed to provide correct
334 order of loops (they are ordered by their last loop BB) and basic
335 blocks in the chain formed by member next. */
336static void
337form_loop_tree (void)
338{
058e97ec
VM
339 basic_block bb;
340 struct loop *parent;
341 ira_loop_tree_node_t bb_node, loop_node;
058e97ec
VM
342
343 /* We can not use loop/bb node access macros because of potential
344 checking and because the nodes are not initialized enough
345 yet. */
11cd3bed 346 FOR_EACH_BB_FN (bb, cfun)
058e97ec
VM
347 {
348 bb_node = &ira_bb_nodes[bb->index];
349 bb_node->bb = bb;
350 bb_node->loop = NULL;
351 bb_node->subloops = NULL;
352 bb_node->children = NULL;
353 bb_node->subloop_next = NULL;
354 bb_node->next = NULL;
2608d841
VM
355 if (current_loops == NULL)
356 parent = NULL;
357 else
358 {
359 for (parent = bb->loop_father;
360 parent != NULL;
361 parent = loop_outer (parent))
362 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
363 break;
364 }
058e97ec 365 add_loop_to_tree (parent);
2608d841 366 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
058e97ec
VM
367 bb_node->next = loop_node->children;
368 bb_node->parent = loop_node;
369 loop_node->children = bb_node;
370 }
2608d841 371 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
058e97ec
VM
372 ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
373 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
374}
375
376\f
377
378/* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
379 tree nodes. */
380static void
381rebuild_regno_allocno_maps (void)
382{
383 unsigned int l;
384 int max_regno, regno;
385 ira_allocno_t a;
386 ira_loop_tree_node_t loop_tree_node;
387 loop_p loop;
388 ira_allocno_iterator ai;
389
2608d841 390 ira_assert (current_loops != NULL);
058e97ec 391 max_regno = max_reg_num ();
0fc822d0 392 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
058e97ec
VM
393 if (ira_loop_nodes[l].regno_allocno_map != NULL)
394 {
395 ira_free (ira_loop_nodes[l].regno_allocno_map);
396 ira_loop_nodes[l].regno_allocno_map
397 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
398 * max_regno);
399 memset (ira_loop_nodes[l].regno_allocno_map, 0,
400 sizeof (ira_allocno_t) * max_regno);
401 }
402 ira_free (ira_regno_allocno_map);
403 ira_regno_allocno_map
404 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
405 memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
406 FOR_EACH_ALLOCNO (a, ai)
407 {
408 if (ALLOCNO_CAP_MEMBER (a) != NULL)
409 /* Caps are not in the regno allocno maps. */
410 continue;
411 regno = ALLOCNO_REGNO (a);
412 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
413 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
414 ira_regno_allocno_map[regno] = a;
415 if (loop_tree_node->regno_allocno_map[regno] == NULL)
416 /* Remember that we can create temporary allocnos to break
417 cycles in register shuffle. */
418 loop_tree_node->regno_allocno_map[regno] = a;
419 }
420}
058e97ec
VM
421\f
422
a49ae217 423/* Pools for allocnos, allocno live ranges and objects. */
0b470bae
ML
424static pool_allocator<live_range> live_range_pool ("live ranges", 100);
425static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
426static pool_allocator<ira_object> object_pool ("objects", 100);
058e97ec
VM
427
428/* Vec containing references to all created allocnos. It is a
429 container of array allocnos. */
9771b263 430static vec<ira_allocno_t> allocno_vec;
058e97ec 431
a49ae217
BS
432/* Vec containing references to all created ira_objects. It is a
433 container of ira_object_id_map. */
9771b263 434static vec<ira_object_t> ira_object_id_map_vec;
058e97ec
VM
435
436/* Initialize data concerning allocnos. */
437static void
438initiate_allocnos (void)
439{
9771b263 440 allocno_vec.create (max_reg_num () * 2);
058e97ec
VM
441 ira_allocnos = NULL;
442 ira_allocnos_num = 0;
a49ae217 443 ira_objects_num = 0;
9771b263 444 ira_object_id_map_vec.create (max_reg_num () * 2);
a49ae217 445 ira_object_id_map = NULL;
058e97ec 446 ira_regno_allocno_map
1756cb66
VM
447 = (ira_allocno_t *) ira_allocate (max_reg_num ()
448 * sizeof (ira_allocno_t));
058e97ec
VM
449 memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
450}
451
a49ae217
BS
452/* Create and return an object corresponding to a new allocno A. */
453static ira_object_t
ac0ab4f7 454ira_create_object (ira_allocno_t a, int subword)
a49ae217 455{
1756cb66 456 enum reg_class aclass = ALLOCNO_CLASS (a);
0b470bae 457 ira_object_t obj = object_pool.allocate ();
a49ae217
BS
458
459 OBJECT_ALLOCNO (obj) = a;
ac0ab4f7 460 OBJECT_SUBWORD (obj) = subword;
a49ae217
BS
461 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
462 OBJECT_CONFLICT_VEC_P (obj) = false;
463 OBJECT_CONFLICT_ARRAY (obj) = NULL;
464 OBJECT_NUM_CONFLICTS (obj) = 0;
465 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
466 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
467 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
1756cb66 468 reg_class_contents[aclass]);
a49ae217 469 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
1756cb66 470 reg_class_contents[aclass]);
a49ae217
BS
471 OBJECT_MIN (obj) = INT_MAX;
472 OBJECT_MAX (obj) = -1;
9140d27b 473 OBJECT_LIVE_RANGES (obj) = NULL;
a49ae217 474
9771b263 475 ira_object_id_map_vec.safe_push (obj);
a49ae217 476 ira_object_id_map
9771b263
DN
477 = ira_object_id_map_vec.address ();
478 ira_objects_num = ira_object_id_map_vec.length ();
ac0ab4f7 479
a49ae217
BS
480 return obj;
481}
482
058e97ec
VM
483/* Create and return the allocno corresponding to REGNO in
484 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
485 same regno if CAP_P is FALSE. */
486ira_allocno_t
1756cb66
VM
487ira_create_allocno (int regno, bool cap_p,
488 ira_loop_tree_node_t loop_tree_node)
058e97ec
VM
489{
490 ira_allocno_t a;
491
0b470bae 492 a = allocno_pool.allocate ();
058e97ec
VM
493 ALLOCNO_REGNO (a) = regno;
494 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
495 if (! cap_p)
496 {
497 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
498 ira_regno_allocno_map[regno] = a;
499 if (loop_tree_node->regno_allocno_map[regno] == NULL)
500 /* Remember that we can create temporary allocnos to break
501 cycles in register shuffle on region borders (see
502 ira-emit.c). */
503 loop_tree_node->regno_allocno_map[regno] = a;
504 }
505 ALLOCNO_CAP (a) = NULL;
506 ALLOCNO_CAP_MEMBER (a) = NULL;
507 ALLOCNO_NUM (a) = ira_allocnos_num;
49d988e7 508 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
058e97ec 509 ALLOCNO_NREFS (a) = 0;
854bd721 510 ALLOCNO_FREQ (a) = 0;
058e97ec
VM
511 ALLOCNO_HARD_REGNO (a) = -1;
512 ALLOCNO_CALL_FREQ (a) = 0;
513 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
e384e6b5 514 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
c2ba7e7a 515 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
058e97ec
VM
516#ifdef STACK_REGS
517 ALLOCNO_NO_STACK_REG_P (a) = false;
518 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
519#endif
058e97ec 520 ALLOCNO_DONT_REASSIGN_P (a) = false;
927425df 521 ALLOCNO_BAD_SPILL_P (a) = false;
058e97ec 522 ALLOCNO_ASSIGNED_P (a) = false;
058e97ec 523 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
d1bb282e 524 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
3b6d1699 525 ALLOCNO_PREFS (a) = NULL;
058e97ec
VM
526 ALLOCNO_COPIES (a) = NULL;
527 ALLOCNO_HARD_REG_COSTS (a) = NULL;
528 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
529 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
530 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1756cb66
VM
531 ALLOCNO_CLASS (a) = NO_REGS;
532 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
533 ALLOCNO_CLASS_COST (a) = 0;
058e97ec
VM
534 ALLOCNO_MEMORY_COST (a) = 0;
535 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
536 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
ac0ab4f7 537 ALLOCNO_NUM_OBJECTS (a) = 0;
a49ae217 538
1756cb66 539 ALLOCNO_ADD_DATA (a) = NULL;
9771b263
DN
540 allocno_vec.safe_push (a);
541 ira_allocnos = allocno_vec.address ();
542 ira_allocnos_num = allocno_vec.length ();
ac0ab4f7 543
058e97ec
VM
544 return a;
545}
546
1756cb66
VM
547/* Set up register class for A and update its conflict hard
548 registers. */
058e97ec 549void
1756cb66 550ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
058e97ec 551{
1756cb66
VM
552 ira_allocno_object_iterator oi;
553 ira_object_t obj;
554
555 ALLOCNO_CLASS (a) = aclass;
556 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
557 {
558 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
559 reg_class_contents[aclass]);
560 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
561 reg_class_contents[aclass]);
562 }
a49ae217
BS
563}
564
ac0ab4f7
BS
565/* Determine the number of objects we should associate with allocno A
566 and allocate them. */
a49ae217 567void
ac0ab4f7 568ira_create_allocno_objects (ira_allocno_t a)
a49ae217 569{
ef4bddc2 570 machine_mode mode = ALLOCNO_MODE (a);
1756cb66
VM
571 enum reg_class aclass = ALLOCNO_CLASS (a);
572 int n = ira_reg_class_max_nregs[aclass][mode];
ac0ab4f7
BS
573 int i;
574
575 if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
576 n = 1;
577
578 ALLOCNO_NUM_OBJECTS (a) = n;
579 for (i = 0; i < n; i++)
580 ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
a49ae217
BS
581}
582
ac0ab4f7 583/* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
1756cb66 584 ALLOCNO_OBJECT structures. This must be called after the allocno
ac0ab4f7 585 classes are known. */
a49ae217
BS
586static void
587create_allocno_objects (void)
588{
589 ira_allocno_t a;
590 ira_allocno_iterator ai;
591
592 FOR_EACH_ALLOCNO (a, ai)
ac0ab4f7 593 ira_create_allocno_objects (a);
058e97ec
VM
594}
595
ac0ab4f7
BS
596/* Merge hard register conflict information for all objects associated with
597 allocno TO into the corresponding objects associated with FROM.
598 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
3c55880a
BS
599static void
600merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
601 bool total_only)
602{
ac0ab4f7
BS
603 int i;
604 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
605 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
606 {
607 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
608 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1756cb66 609
ac0ab4f7
BS
610 if (!total_only)
611 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
612 OBJECT_CONFLICT_HARD_REGS (from_obj));
613 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
614 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
615 }
3c55880a
BS
616#ifdef STACK_REGS
617 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
618 ALLOCNO_NO_STACK_REG_P (to) = true;
619 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
620 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
621#endif
622}
623
ac0ab4f7
BS
624/* Update hard register conflict information for all objects associated with
625 A to include the regs in SET. */
626void
627ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
628{
629 ira_allocno_object_iterator i;
630 ira_object_t obj;
1756cb66 631
ac0ab4f7
BS
632 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
633 {
634 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
635 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
636 }
637}
638
a49ae217
BS
639/* Return TRUE if a conflict vector with NUM elements is more
640 profitable than a conflict bit vector for OBJ. */
058e97ec 641bool
a49ae217 642ira_conflict_vector_profitable_p (ira_object_t obj, int num)
058e97ec
VM
643{
644 int nw;
a49ae217
BS
645 int max = OBJECT_MAX (obj);
646 int min = OBJECT_MIN (obj);
058e97ec 647
a49ae217
BS
648 if (max < min)
649 /* We prefer a bit vector in such case because it does not result
650 in allocation. */
058e97ec
VM
651 return false;
652
a49ae217
BS
653 nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
654 return (2 * sizeof (ira_object_t) * (num + 1)
058e97ec
VM
655 < 3 * nw * sizeof (IRA_INT_TYPE));
656}
657
a49ae217
BS
658/* Allocates and initialize the conflict vector of OBJ for NUM
659 conflicting objects. */
058e97ec 660void
a49ae217 661ira_allocate_conflict_vec (ira_object_t obj, int num)
058e97ec
VM
662{
663 int size;
a49ae217 664 ira_object_t *vec;
058e97ec 665
a49ae217 666 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
058e97ec 667 num++; /* for NULL end marker */
a49ae217
BS
668 size = sizeof (ira_object_t) * num;
669 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
670 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
058e97ec 671 vec[0] = NULL;
a49ae217
BS
672 OBJECT_NUM_CONFLICTS (obj) = 0;
673 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
674 OBJECT_CONFLICT_VEC_P (obj) = true;
058e97ec
VM
675}
676
a49ae217 677/* Allocate and initialize the conflict bit vector of OBJ. */
058e97ec 678static void
a49ae217 679allocate_conflict_bit_vec (ira_object_t obj)
058e97ec
VM
680{
681 unsigned int size;
682
a49ae217
BS
683 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
684 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
058e97ec 685 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
a49ae217
BS
686 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
687 memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
688 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
689 OBJECT_CONFLICT_VEC_P (obj) = false;
058e97ec
VM
690}
691
692/* Allocate and initialize the conflict vector or conflict bit vector
ac0ab4f7 693 of OBJ for NUM conflicting allocnos whatever is more profitable. */
058e97ec 694void
ac0ab4f7 695ira_allocate_object_conflicts (ira_object_t obj, int num)
058e97ec 696{
ac0ab4f7
BS
697 if (ira_conflict_vector_profitable_p (obj, num))
698 ira_allocate_conflict_vec (obj, num);
058e97ec 699 else
ac0ab4f7 700 allocate_conflict_bit_vec (obj);
058e97ec
VM
701}
702
a49ae217 703/* Add OBJ2 to the conflicts of OBJ1. */
058e97ec 704static void
a49ae217 705add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
058e97ec
VM
706{
707 int num;
708 unsigned int size;
709
a49ae217 710 if (OBJECT_CONFLICT_VEC_P (obj1))
058e97ec 711 {
a49ae217
BS
712 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
713 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
714 num = curr_num + 2;
715 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
058e97ec 716 {
a49ae217 717 ira_object_t *newvec;
058e97ec 718 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
a49ae217
BS
719 newvec = (ira_object_t *) ira_allocate (size);
720 memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
721 ira_free (vec);
722 vec = newvec;
723 OBJECT_CONFLICT_ARRAY (obj1) = vec;
724 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
058e97ec 725 }
a49ae217 726 vec[num - 2] = obj2;
058e97ec 727 vec[num - 1] = NULL;
a49ae217 728 OBJECT_NUM_CONFLICTS (obj1)++;
058e97ec
VM
729 }
730 else
731 {
732 int nw, added_head_nw, id;
a49ae217 733 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
058e97ec 734
a49ae217
BS
735 id = OBJECT_CONFLICT_ID (obj2);
736 if (OBJECT_MIN (obj1) > id)
058e97ec
VM
737 {
738 /* Expand head of the bit vector. */
a49ae217
BS
739 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
740 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
058e97ec 741 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
a49ae217 742 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
058e97ec
VM
743 {
744 memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
745 vec, nw * sizeof (IRA_INT_TYPE));
746 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
747 }
748 else
749 {
750 size
751 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
752 vec = (IRA_INT_TYPE *) ira_allocate (size);
753 memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
a49ae217 754 OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
058e97ec
VM
755 memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
756 memset ((char *) vec
757 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
758 0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
a49ae217
BS
759 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
760 OBJECT_CONFLICT_ARRAY (obj1) = vec;
761 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
058e97ec 762 }
a49ae217 763 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
058e97ec 764 }
a49ae217 765 else if (OBJECT_MAX (obj1) < id)
058e97ec 766 {
a49ae217 767 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
058e97ec 768 size = nw * sizeof (IRA_INT_TYPE);
a49ae217 769 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
058e97ec
VM
770 {
771 /* Expand tail of the bit vector. */
772 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
773 vec = (IRA_INT_TYPE *) ira_allocate (size);
a49ae217
BS
774 memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
775 memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
776 0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
777 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
778 OBJECT_CONFLICT_ARRAY (obj1) = vec;
779 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
058e97ec 780 }
a49ae217 781 OBJECT_MAX (obj1) = id;
058e97ec 782 }
a49ae217 783 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
058e97ec
VM
784 }
785}
786
a49ae217
BS
787/* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
788static void
789ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
058e97ec 790{
a49ae217
BS
791 add_to_conflicts (obj1, obj2);
792 add_to_conflicts (obj2, obj1);
058e97ec
VM
793}
794
a49ae217 795/* Clear all conflicts of OBJ. */
058e97ec 796static void
a49ae217 797clear_conflicts (ira_object_t obj)
058e97ec 798{
a49ae217 799 if (OBJECT_CONFLICT_VEC_P (obj))
058e97ec 800 {
a49ae217
BS
801 OBJECT_NUM_CONFLICTS (obj) = 0;
802 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
058e97ec 803 }
a49ae217 804 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
058e97ec
VM
805 {
806 int nw;
807
a49ae217
BS
808 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
809 memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
058e97ec
VM
810 }
811}
812
813/* The array used to find duplications in conflict vectors of
814 allocnos. */
a49ae217 815static int *conflict_check;
058e97ec
VM
816
817/* The value used to mark allocation presence in conflict vector of
818 the current allocno. */
a49ae217 819static int curr_conflict_check_tick;
058e97ec 820
a49ae217 821/* Remove duplications in conflict vector of OBJ. */
058e97ec 822static void
a49ae217 823compress_conflict_vec (ira_object_t obj)
058e97ec 824{
a49ae217 825 ira_object_t *vec, conflict_obj;
058e97ec
VM
826 int i, j;
827
a49ae217
BS
828 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
829 vec = OBJECT_CONFLICT_VEC (obj);
830 curr_conflict_check_tick++;
831 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
058e97ec 832 {
a49ae217
BS
833 int id = OBJECT_CONFLICT_ID (conflict_obj);
834 if (conflict_check[id] != curr_conflict_check_tick)
058e97ec 835 {
a49ae217
BS
836 conflict_check[id] = curr_conflict_check_tick;
837 vec[j++] = conflict_obj;
058e97ec
VM
838 }
839 }
a49ae217 840 OBJECT_NUM_CONFLICTS (obj) = j;
058e97ec
VM
841 vec[j] = NULL;
842}
843
844/* Remove duplications in conflict vectors of all allocnos. */
845static void
846compress_conflict_vecs (void)
847{
ac0ab4f7
BS
848 ira_object_t obj;
849 ira_object_iterator oi;
058e97ec 850
a49ae217
BS
851 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
852 memset (conflict_check, 0, sizeof (int) * ira_objects_num);
853 curr_conflict_check_tick = 0;
ac0ab4f7 854 FOR_EACH_OBJECT (obj, oi)
a49ae217 855 {
a49ae217
BS
856 if (OBJECT_CONFLICT_VEC_P (obj))
857 compress_conflict_vec (obj);
858 }
859 ira_free (conflict_check);
058e97ec
VM
860}
861
862/* This recursive function outputs allocno A and if it is a cap the
863 function outputs its members. */
864void
865ira_print_expanded_allocno (ira_allocno_t a)
866{
867 basic_block bb;
868
869 fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
870 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
871 fprintf (ira_dump_file, ",b%d", bb->index);
872 else
2608d841 873 fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
058e97ec
VM
874 if (ALLOCNO_CAP_MEMBER (a) != NULL)
875 {
876 fprintf (ira_dump_file, ":");
877 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
878 }
879 fprintf (ira_dump_file, ")");
880}
881
882/* Create and return the cap representing allocno A in the
883 parent loop. */
884static ira_allocno_t
885create_cap_allocno (ira_allocno_t a)
886{
887 ira_allocno_t cap;
888 ira_loop_tree_node_t parent;
1756cb66 889 enum reg_class aclass;
058e97ec 890
058e97ec
VM
891 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
892 cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
893 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
d1bb282e 894 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
1756cb66
VM
895 aclass = ALLOCNO_CLASS (a);
896 ira_set_allocno_class (cap, aclass);
ac0ab4f7 897 ira_create_allocno_objects (cap);
058e97ec 898 ALLOCNO_CAP_MEMBER (cap) = a;
058e97ec 899 ALLOCNO_CAP (a) = cap;
1756cb66 900 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
058e97ec 901 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
058e97ec 902 ira_allocate_and_copy_costs
1756cb66 903 (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
058e97ec 904 ira_allocate_and_copy_costs
1756cb66 905 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
058e97ec 906 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
927425df 907 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
058e97ec
VM
908 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
909 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
910 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
ac0ab4f7 911
3c55880a 912 merge_hard_reg_conflicts (a, cap, false);
ac0ab4f7 913
058e97ec 914 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
e384e6b5 915 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
c2ba7e7a
RO
916 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap),
917 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
058e97ec
VM
918 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
919 {
920 fprintf (ira_dump_file, " Creating cap ");
921 ira_print_expanded_allocno (cap);
922 fprintf (ira_dump_file, "\n");
923 }
924 return cap;
925}
926
ac0ab4f7 927/* Create and return a live range for OBJECT with given attributes. */
b14151b5 928live_range_t
9140d27b
BS
929ira_create_live_range (ira_object_t obj, int start, int finish,
930 live_range_t next)
058e97ec 931{
b14151b5 932 live_range_t p;
058e97ec 933
0b470bae 934 p = live_range_pool.allocate ();
9140d27b 935 p->object = obj;
058e97ec
VM
936 p->start = start;
937 p->finish = finish;
938 p->next = next;
939 return p;
940}
941
ac0ab4f7
BS
942/* Create a new live range for OBJECT and queue it at the head of its
943 live range list. */
944void
945ira_add_live_range_to_object (ira_object_t object, int start, int finish)
946{
947 live_range_t p;
948 p = ira_create_live_range (object, start, finish,
949 OBJECT_LIVE_RANGES (object));
950 OBJECT_LIVE_RANGES (object) = p;
951}
952
058e97ec 953/* Copy allocno live range R and return the result. */
b14151b5 954static live_range_t
9140d27b 955copy_live_range (live_range_t r)
058e97ec 956{
b14151b5 957 live_range_t p;
058e97ec 958
0b470bae 959 p = live_range_pool.allocate ();
058e97ec
VM
960 *p = *r;
961 return p;
962}
963
964/* Copy allocno live range list given by its head R and return the
965 result. */
b14151b5 966live_range_t
9140d27b 967ira_copy_live_range_list (live_range_t r)
058e97ec 968{
b14151b5 969 live_range_t p, first, last;
058e97ec
VM
970
971 if (r == NULL)
972 return NULL;
973 for (first = last = NULL; r != NULL; r = r->next)
974 {
9140d27b 975 p = copy_live_range (r);
058e97ec
VM
976 if (first == NULL)
977 first = p;
978 else
979 last->next = p;
980 last = p;
981 }
982 return first;
983}
984
3553f0bb
VM
985/* Merge ranges R1 and R2 and returns the result. The function
986 maintains the order of ranges and tries to minimize number of the
987 result ranges. */
b14151b5 988live_range_t
9140d27b 989ira_merge_live_ranges (live_range_t r1, live_range_t r2)
3553f0bb 990{
fab27f52 991 live_range_t first, last;
3553f0bb
VM
992
993 if (r1 == NULL)
994 return r2;
995 if (r2 == NULL)
996 return r1;
997 for (first = last = NULL; r1 != NULL && r2 != NULL;)
998 {
999 if (r1->start < r2->start)
fab27f52 1000 std::swap (r1, r2);
3553f0bb
VM
1001 if (r1->start <= r2->finish + 1)
1002 {
1003 /* Intersected ranges: merge r1 and r2 into r1. */
1004 r1->start = r2->start;
1005 if (r1->finish < r2->finish)
1006 r1->finish = r2->finish;
fab27f52 1007 live_range_t temp = r2;
3553f0bb 1008 r2 = r2->next;
9140d27b 1009 ira_finish_live_range (temp);
3553f0bb
VM
1010 if (r2 == NULL)
1011 {
1012 /* To try to merge with subsequent ranges in r1. */
1013 r2 = r1->next;
1014 r1->next = NULL;
1015 }
1016 }
1017 else
1018 {
1019 /* Add r1 to the result. */
1020 if (first == NULL)
1021 first = last = r1;
1022 else
1023 {
1024 last->next = r1;
1025 last = r1;
1026 }
1027 r1 = r1->next;
1028 if (r1 == NULL)
1029 {
1030 /* To try to merge with subsequent ranges in r2. */
1031 r1 = r2->next;
1032 r2->next = NULL;
1033 }
1034 }
1035 }
1036 if (r1 != NULL)
1037 {
1038 if (first == NULL)
1039 first = r1;
1040 else
1041 last->next = r1;
1042 ira_assert (r1->next == NULL);
1043 }
1044 else if (r2 != NULL)
1045 {
1046 if (first == NULL)
1047 first = r2;
1048 else
1049 last->next = r2;
1050 ira_assert (r2->next == NULL);
1051 }
1052 else
1053 {
1054 ira_assert (last->next == NULL);
1055 }
1056 return first;
1057}
1058
1059/* Return TRUE if live ranges R1 and R2 intersect. */
1060bool
9140d27b 1061ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
3553f0bb
VM
1062{
1063 /* Remember the live ranges are always kept ordered. */
1064 while (r1 != NULL && r2 != NULL)
1065 {
1066 if (r1->start > r2->finish)
1067 r1 = r1->next;
1068 else if (r2->start > r1->finish)
1069 r2 = r2->next;
1070 else
1071 return true;
1072 }
1073 return false;
1074}
1075
058e97ec
VM
1076/* Free allocno live range R. */
1077void
9140d27b 1078ira_finish_live_range (live_range_t r)
058e97ec 1079{
0b470bae 1080 live_range_pool.remove (r);
058e97ec
VM
1081}
1082
3553f0bb
VM
1083/* Free list of allocno live ranges starting with R. */
1084void
9140d27b 1085ira_finish_live_range_list (live_range_t r)
3553f0bb 1086{
b14151b5 1087 live_range_t next_r;
3553f0bb
VM
1088
1089 for (; r != NULL; r = next_r)
1090 {
1091 next_r = r->next;
9140d27b 1092 ira_finish_live_range (r);
3553f0bb
VM
1093 }
1094}
1095
058e97ec
VM
1096/* Free updated register costs of allocno A. */
1097void
1098ira_free_allocno_updated_costs (ira_allocno_t a)
1099{
1756cb66 1100 enum reg_class aclass;
058e97ec 1101
1756cb66 1102 aclass = ALLOCNO_CLASS (a);
058e97ec 1103 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1756cb66 1104 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
058e97ec
VM
1105 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1106 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1107 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1756cb66 1108 aclass);
058e97ec
VM
1109 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1110}
1111
1756cb66
VM
1112/* Free and nullify all cost vectors allocated earlier for allocno
1113 A. */
058e97ec 1114static void
1756cb66 1115ira_free_allocno_costs (ira_allocno_t a)
058e97ec 1116{
1756cb66 1117 enum reg_class aclass = ALLOCNO_CLASS (a);
ac0ab4f7
BS
1118 ira_object_t obj;
1119 ira_allocno_object_iterator oi;
058e97ec 1120
ac0ab4f7
BS
1121 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1122 {
1123 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1124 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1125 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1126 ira_free (OBJECT_CONFLICT_ARRAY (obj));
0b470bae 1127 object_pool.remove (obj);
ac0ab4f7 1128 }
9140d27b 1129
058e97ec 1130 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
058e97ec 1131 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1756cb66 1132 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
058e97ec 1133 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1756cb66 1134 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
058e97ec 1135 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1756cb66 1136 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
058e97ec
VM
1137 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1138 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1756cb66
VM
1139 aclass);
1140 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1141 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1142 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1143 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1144}
1145
1146/* Free the memory allocated for allocno A. */
1147static void
1148finish_allocno (ira_allocno_t a)
1149{
1150 ira_free_allocno_costs (a);
0b470bae 1151 allocno_pool.remove (a);
058e97ec
VM
1152}
1153
1154/* Free the memory allocated for all allocnos. */
1155static void
1156finish_allocnos (void)
1157{
1158 ira_allocno_t a;
1159 ira_allocno_iterator ai;
1160
1161 FOR_EACH_ALLOCNO (a, ai)
1162 finish_allocno (a);
1163 ira_free (ira_regno_allocno_map);
9771b263
DN
1164 ira_object_id_map_vec.release ();
1165 allocno_vec.release ();
0b470bae
ML
1166 allocno_pool.release ();
1167 object_pool.release ();
1168 live_range_pool.release ();
058e97ec
VM
1169}
1170
1171\f
1172
3b6d1699 1173/* Pools for allocno preferences. */
0b470bae 1174static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
3b6d1699
VM
1175
1176/* Vec containing references to all created preferences. It is a
1177 container of array ira_prefs. */
1178static vec<ira_pref_t> pref_vec;
1179
1180/* The function initializes data concerning allocno prefs. */
1181static void
1182initiate_prefs (void)
1183{
3b6d1699
VM
1184 pref_vec.create (get_max_uid ());
1185 ira_prefs = NULL;
1186 ira_prefs_num = 0;
1187}
1188
1189/* Return pref for A and HARD_REGNO if any. */
1190static ira_pref_t
1191find_allocno_pref (ira_allocno_t a, int hard_regno)
1192{
1193 ira_pref_t pref;
1194
1195 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1196 if (pref->allocno == a && pref->hard_regno == hard_regno)
1197 return pref;
1198 return NULL;
1199}
1200
1201/* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1202ira_pref_t
1203ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1204{
1205 ira_pref_t pref;
1206
0b470bae 1207 pref = pref_pool.allocate ();
3b6d1699
VM
1208 pref->num = ira_prefs_num;
1209 pref->allocno = a;
1210 pref->hard_regno = hard_regno;
1211 pref->freq = freq;
1212 pref_vec.safe_push (pref);
1213 ira_prefs = pref_vec.address ();
1214 ira_prefs_num = pref_vec.length ();
1215 return pref;
1216}
1217
df3e3493 1218/* Attach a pref PREF to the corresponding allocno. */
3b6d1699
VM
1219static void
1220add_allocno_pref_to_list (ira_pref_t pref)
1221{
1222 ira_allocno_t a = pref->allocno;
1223
1224 pref->next_pref = ALLOCNO_PREFS (a);
1225 ALLOCNO_PREFS (a) = pref;
1226}
1227
1228/* Create (or update frequency if the pref already exists) the pref of
1229 allocnos A preferring HARD_REGNO with frequency FREQ. */
1230void
1231ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1232{
1233 ira_pref_t pref;
1234
1235 if (freq <= 0)
1236 return;
1237 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1238 {
1239 pref->freq += freq;
1240 return;
1241 }
1242 pref = ira_create_pref (a, hard_regno, freq);
1243 ira_assert (a != NULL);
1244 add_allocno_pref_to_list (pref);
1245}
1246
1247/* Print info about PREF into file F. */
1248static void
1249print_pref (FILE *f, ira_pref_t pref)
1250{
1251 fprintf (f, " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1252 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1253 pref->hard_regno, pref->freq);
1254}
1255
1256/* Print info about PREF into stderr. */
1257void
1258ira_debug_pref (ira_pref_t pref)
1259{
1260 print_pref (stderr, pref);
1261}
1262
1263/* Print info about all prefs into file F. */
1264static void
1265print_prefs (FILE *f)
1266{
1267 ira_pref_t pref;
1268 ira_pref_iterator pi;
1269
1270 FOR_EACH_PREF (pref, pi)
1271 print_pref (f, pref);
1272}
1273
1274/* Print info about all prefs into stderr. */
1275void
1276ira_debug_prefs (void)
1277{
1278 print_prefs (stderr);
1279}
1280
1281/* Print info about prefs involving allocno A into file F. */
1282static void
1283print_allocno_prefs (FILE *f, ira_allocno_t a)
1284{
1285 ira_pref_t pref;
1286
1287 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1288 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1289 fprintf (f, " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1290 fprintf (f, "\n");
1291}
1292
1293/* Print info about prefs involving allocno A into stderr. */
1294void
1295ira_debug_allocno_prefs (ira_allocno_t a)
1296{
1297 print_allocno_prefs (stderr, a);
1298}
1299
1300/* The function frees memory allocated for PREF. */
1301static void
1302finish_pref (ira_pref_t pref)
1303{
1304 ira_prefs[pref->num] = NULL;
0b470bae 1305 pref_pool.remove (pref);
3b6d1699
VM
1306}
1307
1308/* Remove PREF from the list of allocno prefs and free memory for
1309 it. */
1310void
1311ira_remove_pref (ira_pref_t pref)
1312{
1313 ira_pref_t cpref, prev;
1314
1315 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1316 fprintf (ira_dump_file, " Removing pref%d:hr%d@%d\n",
1317 pref->num, pref->hard_regno, pref->freq);
1318 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1319 cpref != NULL;
1320 prev = cpref, cpref = cpref->next_pref)
1321 if (cpref == pref)
1322 break;
1323 ira_assert (cpref != NULL);
1324 if (prev == NULL)
1325 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1326 else
1327 prev->next_pref = pref->next_pref;
1328 finish_pref (pref);
1329}
1330
1331/* Remove all prefs of allocno A. */
1332void
1333ira_remove_allocno_prefs (ira_allocno_t a)
1334{
1335 ira_pref_t pref, next_pref;
1336
1337 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1338 {
1339 next_pref = pref->next_pref;
1340 finish_pref (pref);
1341 }
1342 ALLOCNO_PREFS (a) = NULL;
1343}
1344
1345/* Free memory allocated for all prefs. */
1346static void
1347finish_prefs (void)
1348{
1349 ira_pref_t pref;
1350 ira_pref_iterator pi;
1351
1352 FOR_EACH_PREF (pref, pi)
1353 finish_pref (pref);
1354 pref_vec.release ();
0b470bae 1355 pref_pool.release ();
3b6d1699
VM
1356}
1357
1358\f
1359
058e97ec 1360/* Pools for copies. */
0b470bae 1361static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
058e97ec
VM
1362
1363/* Vec containing references to all created copies. It is a
1364 container of array ira_copies. */
9771b263 1365static vec<ira_copy_t> copy_vec;
058e97ec
VM
1366
1367/* The function initializes data concerning allocno copies. */
1368static void
1369initiate_copies (void)
1370{
9771b263 1371 copy_vec.create (get_max_uid ());
058e97ec
VM
1372 ira_copies = NULL;
1373 ira_copies_num = 0;
1374}
1375
1376/* Return copy connecting A1 and A2 and originated from INSN of
1377 LOOP_TREE_NODE if any. */
1378static ira_copy_t
070a1983 1379find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
058e97ec
VM
1380 ira_loop_tree_node_t loop_tree_node)
1381{
1382 ira_copy_t cp, next_cp;
1383 ira_allocno_t another_a;
1384
1385 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1386 {
1387 if (cp->first == a1)
1388 {
1389 next_cp = cp->next_first_allocno_copy;
1390 another_a = cp->second;
1391 }
1392 else if (cp->second == a1)
1393 {
1394 next_cp = cp->next_second_allocno_copy;
1395 another_a = cp->first;
1396 }
1397 else
1398 gcc_unreachable ();
1399 if (another_a == a2 && cp->insn == insn
1400 && cp->loop_tree_node == loop_tree_node)
1401 return cp;
1402 }
1403 return NULL;
1404}
1405
1406/* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
548a6322 1407 SECOND, FREQ, CONSTRAINT_P, and INSN. */
058e97ec 1408ira_copy_t
548a6322 1409ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
070a1983 1410 bool constraint_p, rtx_insn *insn,
058e97ec
VM
1411 ira_loop_tree_node_t loop_tree_node)
1412{
1413 ira_copy_t cp;
1414
0b470bae 1415 cp = copy_pool.allocate ();
058e97ec
VM
1416 cp->num = ira_copies_num;
1417 cp->first = first;
1418 cp->second = second;
1419 cp->freq = freq;
548a6322 1420 cp->constraint_p = constraint_p;
058e97ec
VM
1421 cp->insn = insn;
1422 cp->loop_tree_node = loop_tree_node;
9771b263
DN
1423 copy_vec.safe_push (cp);
1424 ira_copies = copy_vec.address ();
1425 ira_copies_num = copy_vec.length ();
058e97ec
VM
1426 return cp;
1427}
1428
1429/* Attach a copy CP to allocnos involved into the copy. */
3b6d1699
VM
1430static void
1431add_allocno_copy_to_list (ira_copy_t cp)
058e97ec
VM
1432{
1433 ira_allocno_t first = cp->first, second = cp->second;
1434
1435 cp->prev_first_allocno_copy = NULL;
1436 cp->prev_second_allocno_copy = NULL;
1437 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1438 if (cp->next_first_allocno_copy != NULL)
1439 {
1440 if (cp->next_first_allocno_copy->first == first)
1441 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1442 else
1443 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1444 }
1445 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1446 if (cp->next_second_allocno_copy != NULL)
1447 {
1448 if (cp->next_second_allocno_copy->second == second)
1449 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1450 else
1451 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1452 }
1453 ALLOCNO_COPIES (first) = cp;
1454 ALLOCNO_COPIES (second) = cp;
1455}
1456
058e97ec
VM
1457/* Make a copy CP a canonical copy where number of the
1458 first allocno is less than the second one. */
3b6d1699
VM
1459static void
1460swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
058e97ec 1461{
058e97ec
VM
1462 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1463 return;
1464
fab27f52
MM
1465 std::swap (cp->first, cp->second);
1466 std::swap (cp->prev_first_allocno_copy, cp->prev_second_allocno_copy);
1467 std::swap (cp->next_first_allocno_copy, cp->next_second_allocno_copy);
058e97ec
VM
1468}
1469
1470/* Create (or update frequency if the copy already exists) and return
1471 the copy of allocnos FIRST and SECOND with frequency FREQ
1472 corresponding to move insn INSN (if any) and originated from
1473 LOOP_TREE_NODE. */
1474ira_copy_t
1475ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
070a1983 1476 bool constraint_p, rtx_insn *insn,
548a6322 1477 ira_loop_tree_node_t loop_tree_node)
058e97ec
VM
1478{
1479 ira_copy_t cp;
1480
1481 if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1482 {
1483 cp->freq += freq;
1484 return cp;
1485 }
548a6322
VM
1486 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1487 loop_tree_node);
058e97ec 1488 ira_assert (first != NULL && second != NULL);
3b6d1699
VM
1489 add_allocno_copy_to_list (cp);
1490 swap_allocno_copy_ends_if_necessary (cp);
058e97ec
VM
1491 return cp;
1492}
1493
4cda38d5
VM
1494/* Print info about copy CP into file F. */
1495static void
1496print_copy (FILE *f, ira_copy_t cp)
1497{
548a6322 1498 fprintf (f, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
4cda38d5 1499 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
548a6322
VM
1500 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1501 cp->insn != NULL
1502 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
4cda38d5
VM
1503}
1504
7b3b6ae4
LC
1505DEBUG_FUNCTION void
1506debug (ira_allocno_copy &ref)
1507{
1508 print_copy (stderr, &ref);
1509}
1510
1511DEBUG_FUNCTION void
1512debug (ira_allocno_copy *ptr)
1513{
1514 if (ptr)
1515 debug (*ptr);
1516 else
1517 fprintf (stderr, "<nil>\n");
1518}
1519
4cda38d5
VM
1520/* Print info about copy CP into stderr. */
1521void
1522ira_debug_copy (ira_copy_t cp)
1523{
1524 print_copy (stderr, cp);
1525}
1526
1527/* Print info about all copies into file F. */
1528static void
1529print_copies (FILE *f)
1530{
1531 ira_copy_t cp;
1532 ira_copy_iterator ci;
1533
1534 FOR_EACH_COPY (cp, ci)
1535 print_copy (f, cp);
1536}
1537
1538/* Print info about all copies into stderr. */
1539void
1540ira_debug_copies (void)
1541{
1542 print_copies (stderr);
1543}
1544
058e97ec
VM
1545/* Print info about copies involving allocno A into file F. */
1546static void
1547print_allocno_copies (FILE *f, ira_allocno_t a)
1548{
1549 ira_allocno_t another_a;
1550 ira_copy_t cp, next_cp;
1551
1552 fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1553 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1554 {
1555 if (cp->first == a)
1556 {
1557 next_cp = cp->next_first_allocno_copy;
1558 another_a = cp->second;
1559 }
1560 else if (cp->second == a)
1561 {
1562 next_cp = cp->next_second_allocno_copy;
1563 another_a = cp->first;
1564 }
1565 else
1566 gcc_unreachable ();
1567 fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1568 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1569 }
1570 fprintf (f, "\n");
1571}
1572
7b3b6ae4
LC
1573DEBUG_FUNCTION void
1574debug (ira_allocno &ref)
1575{
1576 print_allocno_copies (stderr, &ref);
1577}
1578
1579DEBUG_FUNCTION void
1580debug (ira_allocno *ptr)
1581{
1582 if (ptr)
1583 debug (*ptr);
1584 else
1585 fprintf (stderr, "<nil>\n");
1586}
1587
1588
058e97ec
VM
1589/* Print info about copies involving allocno A into stderr. */
1590void
1591ira_debug_allocno_copies (ira_allocno_t a)
1592{
1593 print_allocno_copies (stderr, a);
1594}
1595
1596/* The function frees memory allocated for copy CP. */
1597static void
1598finish_copy (ira_copy_t cp)
1599{
0b470bae 1600 copy_pool.remove (cp);
058e97ec
VM
1601}
1602
1603
1604/* Free memory allocated for all copies. */
1605static void
1606finish_copies (void)
1607{
1608 ira_copy_t cp;
1609 ira_copy_iterator ci;
1610
1611 FOR_EACH_COPY (cp, ci)
1612 finish_copy (cp);
9771b263 1613 copy_vec.release ();
0b470bae 1614 copy_pool.release ();
058e97ec
VM
1615}
1616
1617\f
1618
1756cb66 1619/* Pools for cost vectors. It is defined only for allocno classes. */
3599f64a 1620static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
058e97ec
VM
1621
1622/* The function initiates work with hard register cost vectors. It
1756cb66 1623 creates allocation pool for each allocno class. */
058e97ec
VM
1624static void
1625initiate_cost_vectors (void)
1626{
1627 int i;
1756cb66 1628 enum reg_class aclass;
058e97ec 1629
1756cb66 1630 for (i = 0; i < ira_allocno_classes_num; i++)
058e97ec 1631 {
1756cb66 1632 aclass = ira_allocno_classes[i];
3599f64a
ML
1633 cost_vector_pool[aclass] = new pool_allocator<int>
1634 ("cost vectors", 100,
1635 sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
058e97ec
VM
1636 }
1637}
1638
1756cb66 1639/* Allocate and return a cost vector VEC for ACLASS. */
058e97ec 1640int *
6f76a878 1641ira_allocate_cost_vector (reg_class_t aclass)
058e97ec 1642{
3599f64a 1643 return cost_vector_pool[(int) aclass]->allocate ();
058e97ec
VM
1644}
1645
1756cb66 1646/* Free a cost vector VEC for ACLASS. */
058e97ec 1647void
6f76a878 1648ira_free_cost_vector (int *vec, reg_class_t aclass)
058e97ec
VM
1649{
1650 ira_assert (vec != NULL);
3599f64a 1651 cost_vector_pool[(int) aclass]->remove (vec);
058e97ec
VM
1652}
1653
1654/* Finish work with hard register cost vectors. Release allocation
1756cb66 1655 pool for each allocno class. */
058e97ec
VM
1656static void
1657finish_cost_vectors (void)
1658{
1659 int i;
1756cb66 1660 enum reg_class aclass;
058e97ec 1661
1756cb66 1662 for (i = 0; i < ira_allocno_classes_num; i++)
058e97ec 1663 {
1756cb66 1664 aclass = ira_allocno_classes[i];
3599f64a 1665 delete cost_vector_pool[aclass];
058e97ec
VM
1666 }
1667}
1668
1669\f
1670
e6a7da82
SB
1671/* Compute a post-ordering of the reverse control flow of the loop body
1672 designated by the children nodes of LOOP_NODE, whose body nodes in
1673 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1674 of the reverse loop body.
1675
1676 For the post-order of the reverse CFG, we visit the basic blocks in
1677 LOOP_PREORDER array in the reverse order of where they appear.
1678 This is important: We do not just want to compute a post-order of
1679 the reverse CFG, we want to make a best-guess for a visiting order that
1680 minimizes the number of chain elements per allocno live range. If the
1681 blocks would be visited in a different order, we would still compute a
1682 correct post-ordering but it would be less likely that two nodes
1683 connected by an edge in the CFG are neighbours in the topsort. */
1684
9771b263 1685static vec<ira_loop_tree_node_t>
e6a7da82 1686ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
9771b263 1687 vec<ira_loop_tree_node_t> loop_preorder)
e6a7da82 1688{
6e1aa848 1689 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
e6a7da82
SB
1690 unsigned int n_loop_preorder;
1691
9771b263 1692 n_loop_preorder = loop_preorder.length ();
e6a7da82
SB
1693 if (n_loop_preorder != 0)
1694 {
1695 ira_loop_tree_node_t subloop_node;
1696 unsigned int i;
ef062b13 1697 auto_vec<ira_loop_tree_node_t> dfs_stack;
e6a7da82
SB
1698
1699 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1700 the flag to mark blocks we still have to visit to add them to
1701 our post-order. Define an alias to avoid confusion. */
1702#define BB_TO_VISIT BB_VISITED
1703
9771b263 1704 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
e6a7da82
SB
1705 {
1706 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1707 subloop_node->bb->flags |= BB_TO_VISIT;
1708 }
1709
9771b263
DN
1710 topsort_nodes.create (n_loop_preorder);
1711 dfs_stack.create (n_loop_preorder);
e6a7da82 1712
9771b263 1713 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
e6a7da82
SB
1714 {
1715 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1716 continue;
1717
1718 subloop_node->bb->flags &= ~BB_TO_VISIT;
9771b263
DN
1719 dfs_stack.quick_push (subloop_node);
1720 while (! dfs_stack.is_empty ())
e6a7da82
SB
1721 {
1722 edge e;
1723 edge_iterator ei;
1724
9771b263 1725 ira_loop_tree_node_t n = dfs_stack.last ();
e6a7da82
SB
1726 FOR_EACH_EDGE (e, ei, n->bb->preds)
1727 {
1728 ira_loop_tree_node_t pred_node;
1729 basic_block pred_bb = e->src;
1730
fefa31b5 1731 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
e6a7da82
SB
1732 continue;
1733
1734 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1735 if (pred_node != n
1736 && (pred_node->bb->flags & BB_TO_VISIT))
1737 {
1738 pred_node->bb->flags &= ~BB_TO_VISIT;
9771b263 1739 dfs_stack.quick_push (pred_node);
e6a7da82
SB
1740 }
1741 }
9771b263 1742 if (n == dfs_stack.last ())
e6a7da82 1743 {
9771b263
DN
1744 dfs_stack.pop ();
1745 topsort_nodes.quick_push (n);
e6a7da82
SB
1746 }
1747 }
1748 }
1749
1750#undef BB_TO_VISIT
e6a7da82
SB
1751 }
1752
9771b263 1753 gcc_assert (topsort_nodes.length () == n_loop_preorder);
e6a7da82
SB
1754 return topsort_nodes;
1755}
1756
058e97ec
VM
1757/* The current loop tree node and its regno allocno map. */
1758ira_loop_tree_node_t ira_curr_loop_tree_node;
1759ira_allocno_t *ira_curr_regno_allocno_map;
1760
1761/* This recursive function traverses loop tree with root LOOP_NODE
1762 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1763 correspondingly in preorder and postorder. The function sets up
1764 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1765 basic block nodes of LOOP_NODE is also processed (before its
e6a7da82
SB
1766 subloop nodes).
1767
1768 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1769 the loop are passed in the *reverse* post-order of the *reverse*
1770 CFG. This is only used by ira_create_allocno_live_ranges, which
1771 wants to visit basic blocks in this order to minimize the number
1772 of elements per live range chain.
1773 Note that the loop tree nodes are still visited in the normal,
1774 forward post-order of the loop tree. */
1775
058e97ec
VM
1776void
1777ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1778 void (*preorder_func) (ira_loop_tree_node_t),
1779 void (*postorder_func) (ira_loop_tree_node_t))
1780{
1781 ira_loop_tree_node_t subloop_node;
1782
1783 ira_assert (loop_node->bb == NULL);
1784 ira_curr_loop_tree_node = loop_node;
1785 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1786
1787 if (preorder_func != NULL)
1788 (*preorder_func) (loop_node);
b8698a0f 1789
058e97ec 1790 if (bb_p)
e6a7da82 1791 {
ef062b13 1792 auto_vec<ira_loop_tree_node_t> loop_preorder;
e6a7da82
SB
1793 unsigned int i;
1794
1795 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1796 is set up such that nodes in the loop body appear in a pre-order
1797 of their place in the CFG. */
1798 for (subloop_node = loop_node->children;
1799 subloop_node != NULL;
1800 subloop_node = subloop_node->next)
1801 if (subloop_node->bb != NULL)
9771b263 1802 loop_preorder.safe_push (subloop_node);
e6a7da82
SB
1803
1804 if (preorder_func != NULL)
9771b263 1805 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
e6a7da82
SB
1806 (*preorder_func) (subloop_node);
1807
1808 if (postorder_func != NULL)
058e97ec 1809 {
9771b263 1810 vec<ira_loop_tree_node_t> loop_rev_postorder =
e6a7da82 1811 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
9771b263 1812 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
058e97ec 1813 (*postorder_func) (subloop_node);
9771b263 1814 loop_rev_postorder.release ();
058e97ec 1815 }
e6a7da82
SB
1816 }
1817
058e97ec
VM
1818 for (subloop_node = loop_node->subloops;
1819 subloop_node != NULL;
1820 subloop_node = subloop_node->subloop_next)
1821 {
1822 ira_assert (subloop_node->bb == NULL);
1823 ira_traverse_loop_tree (bb_p, subloop_node,
1824 preorder_func, postorder_func);
1825 }
1826
1827 ira_curr_loop_tree_node = loop_node;
1828 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1829
1830 if (postorder_func != NULL)
1831 (*postorder_func) (loop_node);
1832}
1833
1834\f
1835
1836/* The basic block currently being processed. */
1837static basic_block curr_bb;
1838
1839/* This recursive function creates allocnos corresponding to
1840 pseudo-registers containing in X. True OUTPUT_P means that X is
d1bb282e 1841 an lvalue. PARENT corresponds to the parent expression of X. */
058e97ec 1842static void
d1bb282e 1843create_insn_allocnos (rtx x, rtx outer, bool output_p)
058e97ec
VM
1844{
1845 int i, j;
1846 const char *fmt;
1847 enum rtx_code code = GET_CODE (x);
1848
1849 if (code == REG)
1850 {
1851 int regno;
1852
1853 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1854 {
1855 ira_allocno_t a;
1856
1857 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
d1bb282e
DS
1858 {
1859 a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1860 if (outer != NULL && GET_CODE (outer) == SUBREG)
1861 {
ef4bddc2 1862 machine_mode wmode = GET_MODE (outer);
d1bb282e
DS
1863 if (GET_MODE_SIZE (wmode) > GET_MODE_SIZE (ALLOCNO_WMODE (a)))
1864 ALLOCNO_WMODE (a) = wmode;
1865 }
1866 }
b8698a0f 1867
058e97ec
VM
1868 ALLOCNO_NREFS (a)++;
1869 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
058e97ec
VM
1870 if (output_p)
1871 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1872 }
1873 return;
1874 }
1875 else if (code == SET)
1876 {
d1bb282e
DS
1877 create_insn_allocnos (SET_DEST (x), NULL, true);
1878 create_insn_allocnos (SET_SRC (x), NULL, false);
058e97ec
VM
1879 return;
1880 }
1881 else if (code == CLOBBER)
1882 {
d1bb282e 1883 create_insn_allocnos (XEXP (x, 0), NULL, true);
058e97ec
VM
1884 return;
1885 }
1886 else if (code == MEM)
1887 {
d1bb282e 1888 create_insn_allocnos (XEXP (x, 0), NULL, false);
058e97ec
VM
1889 return;
1890 }
b8698a0f 1891 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
058e97ec
VM
1892 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1893 {
d1bb282e
DS
1894 create_insn_allocnos (XEXP (x, 0), NULL, true);
1895 create_insn_allocnos (XEXP (x, 0), NULL, false);
058e97ec
VM
1896 return;
1897 }
1898
1899 fmt = GET_RTX_FORMAT (code);
1900 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1901 {
1902 if (fmt[i] == 'e')
d1bb282e 1903 create_insn_allocnos (XEXP (x, i), x, output_p);
058e97ec
VM
1904 else if (fmt[i] == 'E')
1905 for (j = 0; j < XVECLEN (x, i); j++)
d1bb282e 1906 create_insn_allocnos (XVECEXP (x, i, j), x, output_p);
058e97ec
VM
1907 }
1908}
1909
1910/* Create allocnos corresponding to pseudo-registers living in the
1911 basic block represented by the corresponding loop tree node
1912 BB_NODE. */
1913static void
1914create_bb_allocnos (ira_loop_tree_node_t bb_node)
1915{
1916 basic_block bb;
070a1983 1917 rtx_insn *insn;
058e97ec
VM
1918 unsigned int i;
1919 bitmap_iterator bi;
1920
1921 curr_bb = bb = bb_node->bb;
1922 ira_assert (bb != NULL);
acb37d29 1923 FOR_BB_INSNS_REVERSE (bb, insn)
b5b8b0ac 1924 if (NONDEBUG_INSN_P (insn))
d1bb282e 1925 create_insn_allocnos (PATTERN (insn), NULL, false);
058e97ec
VM
1926 /* It might be a allocno living through from one subloop to
1927 another. */
bf744527 1928 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
058e97ec
VM
1929 if (ira_curr_regno_allocno_map[i] == NULL)
1930 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1931}
1932
1933/* Create allocnos corresponding to pseudo-registers living on edge E
1934 (a loop entry or exit). Also mark the allocnos as living on the
1935 loop border. */
1936static void
1937create_loop_allocnos (edge e)
1938{
1939 unsigned int i;
1940 bitmap live_in_regs, border_allocnos;
1941 bitmap_iterator bi;
1942 ira_loop_tree_node_t parent;
1943
bf744527 1944 live_in_regs = df_get_live_in (e->dest);
058e97ec 1945 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
bf744527 1946 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
058e97ec
VM
1947 FIRST_PSEUDO_REGISTER, i, bi)
1948 if (bitmap_bit_p (live_in_regs, i))
1949 {
1950 if (ira_curr_regno_allocno_map[i] == NULL)
1951 {
1952 /* The order of creations is important for right
1953 ira_regno_allocno_map. */
1954 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1955 && parent->regno_allocno_map[i] == NULL)
1956 ira_create_allocno (i, false, parent);
1957 ira_create_allocno (i, false, ira_curr_loop_tree_node);
1958 }
1959 bitmap_set_bit (border_allocnos,
1960 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1961 }
1962}
1963
1964/* Create allocnos corresponding to pseudo-registers living in loop
1965 represented by the corresponding loop tree node LOOP_NODE. This
1966 function is called by ira_traverse_loop_tree. */
1967static void
1968create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1969{
1970 if (loop_node->bb != NULL)
1971 create_bb_allocnos (loop_node);
1972 else if (loop_node != ira_loop_tree_root)
1973 {
1974 int i;
1975 edge_iterator ei;
1976 edge e;
9771b263 1977 vec<edge> edges;
058e97ec 1978
2608d841 1979 ira_assert (current_loops != NULL);
058e97ec
VM
1980 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1981 if (e->src != loop_node->loop->latch)
1982 create_loop_allocnos (e);
b8698a0f 1983
058e97ec 1984 edges = get_loop_exit_edges (loop_node->loop);
9771b263 1985 FOR_EACH_VEC_ELT (edges, i, e)
058e97ec 1986 create_loop_allocnos (e);
9771b263 1987 edges.release ();
058e97ec
VM
1988 }
1989}
1990
1991/* Propagate information about allocnos modified inside the loop given
1992 by its LOOP_TREE_NODE to its parent. */
1993static void
1994propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1995{
1996 if (loop_tree_node == ira_loop_tree_root)
1997 return;
1998 ira_assert (loop_tree_node->bb == NULL);
1999 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
2000 loop_tree_node->modified_regnos);
2001}
2002
2003/* Propagate new info about allocno A (see comments about accumulated
2004 info in allocno definition) to the corresponding allocno on upper
2005 loop tree level. So allocnos on upper levels accumulate
2006 information about the corresponding allocnos in nested regions.
2007 The new info means allocno info finally calculated in this
2008 file. */
2009static void
2010propagate_allocno_info (void)
2011{
2012 int i;
2013 ira_allocno_t a, parent_a;
2014 ira_loop_tree_node_t parent;
1756cb66 2015 enum reg_class aclass;
058e97ec 2016
7db7ed3c
VM
2017 if (flag_ira_region != IRA_REGION_ALL
2018 && flag_ira_region != IRA_REGION_MIXED)
058e97ec
VM
2019 return;
2020 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2021 for (a = ira_regno_allocno_map[i];
2022 a != NULL;
2023 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2024 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2025 && (parent_a = parent->regno_allocno_map[i]) != NULL
2026 /* There are no caps yet at this point. So use
2027 border_allocnos to find allocnos for the propagation. */
2028 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2029 ALLOCNO_NUM (a)))
2030 {
927425df
VM
2031 if (! ALLOCNO_BAD_SPILL_P (a))
2032 ALLOCNO_BAD_SPILL_P (parent_a) = false;
058e97ec
VM
2033 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2034 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2035 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3c55880a 2036 merge_hard_reg_conflicts (a, parent_a, true);
058e97ec
VM
2037 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2038 += ALLOCNO_CALLS_CROSSED_NUM (a);
e384e6b5
BS
2039 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2040 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
c2ba7e7a
RO
2041 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
2042 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
058e97ec
VM
2043 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2044 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1756cb66
VM
2045 aclass = ALLOCNO_CLASS (a);
2046 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
058e97ec 2047 ira_allocate_and_accumulate_costs
1756cb66 2048 (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
058e97ec
VM
2049 ALLOCNO_HARD_REG_COSTS (a));
2050 ira_allocate_and_accumulate_costs
2051 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1756cb66 2052 aclass,
058e97ec 2053 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1756cb66
VM
2054 ALLOCNO_CLASS_COST (parent_a)
2055 += ALLOCNO_CLASS_COST (a);
058e97ec 2056 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
058e97ec
VM
2057 }
2058}
2059
2060/* Create allocnos corresponding to pseudo-registers in the current
2061 function. Traverse the loop tree for this. */
2062static void
2063create_allocnos (void)
2064{
2065 /* We need to process BB first to correctly link allocnos by member
2066 next_regno_allocno. */
2067 ira_traverse_loop_tree (true, ira_loop_tree_root,
2068 create_loop_tree_node_allocnos, NULL);
2069 if (optimize)
2070 ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
2071 propagate_modified_regnos);
2072}
2073
2074\f
2075
2076/* The page contains function to remove some regions from a separate
2077 register allocation. We remove regions whose separate allocation
2078 will hardly improve the result. As a result we speed up regional
2079 register allocation. */
2080
9140d27b 2081/* The function changes the object in range list given by R to OBJ. */
058e97ec 2082static void
9140d27b 2083change_object_in_range_list (live_range_t r, ira_object_t obj)
058e97ec
VM
2084{
2085 for (; r != NULL; r = r->next)
9140d27b 2086 r->object = obj;
058e97ec
VM
2087}
2088
3c55880a
BS
2089/* Move all live ranges associated with allocno FROM to allocno TO. */
2090static void
2091move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2092{
ac0ab4f7
BS
2093 int i;
2094 int n = ALLOCNO_NUM_OBJECTS (from);
2095
2096 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
3c55880a 2097
ac0ab4f7 2098 for (i = 0; i < n; i++)
3c55880a 2099 {
ac0ab4f7
BS
2100 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2101 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2102 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2103
2104 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2105 {
2106 fprintf (ira_dump_file,
2107 " Moving ranges of a%dr%d to a%dr%d: ",
2108 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2109 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2110 ira_print_live_range_list (ira_dump_file, lr);
2111 }
2112 change_object_in_range_list (lr, to_obj);
2113 OBJECT_LIVE_RANGES (to_obj)
2114 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
2115 OBJECT_LIVE_RANGES (from_obj) = NULL;
3c55880a 2116 }
3c55880a
BS
2117}
2118
3c55880a
BS
2119static void
2120copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2121{
ac0ab4f7
BS
2122 int i;
2123 int n = ALLOCNO_NUM_OBJECTS (from);
3c55880a 2124
ac0ab4f7
BS
2125 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2126
2127 for (i = 0; i < n; i++)
3c55880a 2128 {
ac0ab4f7
BS
2129 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2130 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2131 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2132
2133 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2134 {
2135 fprintf (ira_dump_file, " Copying ranges of a%dr%d to a%dr%d: ",
2136 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2137 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2138 ira_print_live_range_list (ira_dump_file, lr);
2139 }
2140 lr = ira_copy_live_range_list (lr);
2141 change_object_in_range_list (lr, to_obj);
2142 OBJECT_LIVE_RANGES (to_obj)
2143 = ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
3c55880a 2144 }
3c55880a
BS
2145}
2146
058e97ec
VM
2147/* Return TRUE if NODE represents a loop with low register
2148 pressure. */
2149static bool
2150low_pressure_loop_node_p (ira_loop_tree_node_t node)
2151{
2152 int i;
1756cb66 2153 enum reg_class pclass;
b8698a0f 2154
058e97ec
VM
2155 if (node->bb != NULL)
2156 return false;
b8698a0f 2157
1756cb66 2158 for (i = 0; i < ira_pressure_classes_num; i++)
058e97ec 2159 {
1756cb66 2160 pclass = ira_pressure_classes[i];
f508f827
RS
2161 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2162 && ira_class_hard_regs_num[pclass] > 1)
058e97ec
VM
2163 return false;
2164 }
2165 return true;
2166}
2167
30a435d8
VM
2168#ifdef STACK_REGS
2169/* Return TRUE if LOOP has a complex enter or exit edge. We don't
2170 form a region from such loop if the target use stack register
2171 because reg-stack.c can not deal with such edges. */
8c5fdaae 2172static bool
30a435d8 2173loop_with_complex_edge_p (struct loop *loop)
8c5fdaae
VM
2174{
2175 int i;
2176 edge_iterator ei;
2177 edge e;
9771b263 2178 vec<edge> edges;
f5843d08 2179 bool res;
8c5fdaae
VM
2180
2181 FOR_EACH_EDGE (e, ei, loop->header->preds)
2182 if (e->flags & EDGE_EH)
2183 return true;
2184 edges = get_loop_exit_edges (loop);
f5843d08 2185 res = false;
9771b263 2186 FOR_EACH_VEC_ELT (edges, i, e)
30a435d8 2187 if (e->flags & EDGE_COMPLEX)
f5843d08
RG
2188 {
2189 res = true;
2190 break;
2191 }
9771b263 2192 edges.release ();
f5843d08 2193 return res;
8c5fdaae 2194}
30a435d8 2195#endif
8c5fdaae 2196
30ea859e
VM
2197/* Sort loops for marking them for removal. We put already marked
2198 loops first, then less frequent loops next, and then outer loops
2199 next. */
2200static int
2201loop_compare_func (const void *v1p, const void *v2p)
2202{
2203 int diff;
2204 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2205 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2206
2207 ira_assert (l1->parent != NULL && l2->parent != NULL);
2208 if (l1->to_remove_p && ! l2->to_remove_p)
2209 return -1;
2210 if (! l1->to_remove_p && l2->to_remove_p)
2211 return 1;
2212 if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
2213 return diff;
2214 if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
2215 return diff;
2216 /* Make sorting stable. */
2608d841 2217 return l1->loop_num - l2->loop_num;
30ea859e
VM
2218}
2219
30ea859e
VM
2220/* Mark loops which should be removed from regional allocation. We
2221 remove a loop with low register pressure inside another loop with
2222 register pressure. In this case a separate allocation of the loop
2223 hardly helps (for irregular register file architecture it could
2224 help by choosing a better hard register in the loop but we prefer
2225 faster allocation even in this case). We also remove cheap loops
8c5fdaae
VM
2226 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2227 exit or enter edges are removed too because the allocation might
2228 require put pseudo moves on the EH edges (we could still do this
2229 for pseudos with caller saved hard registers in some cases but it
2230 is impossible to say here or during top-down allocation pass what
2231 hard register the pseudos get finally). */
30ea859e
VM
2232static void
2233mark_loops_for_removal (void)
058e97ec 2234{
30ea859e
VM
2235 int i, n;
2236 ira_loop_tree_node_t *sorted_loops;
2237 loop_p loop;
2238
2608d841 2239 ira_assert (current_loops != NULL);
30ea859e
VM
2240 sorted_loops
2241 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
0fc822d0
RB
2242 * number_of_loops (cfun));
2243 for (n = i = 0; vec_safe_iterate (get_loops (cfun), i, &loop); i++)
30ea859e
VM
2244 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2245 {
2246 if (ira_loop_nodes[i].parent == NULL)
2247 {
2248 /* Don't remove the root. */
2249 ira_loop_nodes[i].to_remove_p = false;
2250 continue;
2251 }
2252 sorted_loops[n++] = &ira_loop_nodes[i];
2253 ira_loop_nodes[i].to_remove_p
8c5fdaae
VM
2254 = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
2255 && low_pressure_loop_node_p (&ira_loop_nodes[i]))
30a435d8
VM
2256#ifdef STACK_REGS
2257 || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
2258#endif
2259 );
30ea859e
VM
2260 }
2261 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2262 for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
2263 {
2264 sorted_loops[i]->to_remove_p = true;
2265 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2266 fprintf
2267 (ira_dump_file,
2268 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2608d841 2269 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
30ea859e
VM
2270 sorted_loops[i]->loop->header->frequency,
2271 loop_depth (sorted_loops[i]->loop),
2272 low_pressure_loop_node_p (sorted_loops[i]->parent)
2273 && low_pressure_loop_node_p (sorted_loops[i])
2274 ? "low pressure" : "cheap loop");
2275 }
2276 ira_free (sorted_loops);
058e97ec
VM
2277}
2278
311aab06
VM
2279/* Mark all loops but root for removing. */
2280static void
2281mark_all_loops_for_removal (void)
2282{
2283 int i;
2284 loop_p loop;
2285
2608d841 2286 ira_assert (current_loops != NULL);
0fc822d0 2287 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
311aab06
VM
2288 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2289 {
2290 if (ira_loop_nodes[i].parent == NULL)
2291 {
2292 /* Don't remove the root. */
2293 ira_loop_nodes[i].to_remove_p = false;
2294 continue;
2295 }
2296 ira_loop_nodes[i].to_remove_p = true;
2297 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2298 fprintf
2299 (ira_dump_file,
2300 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2608d841 2301 ira_loop_nodes[i].loop_num,
311aab06
VM
2302 ira_loop_nodes[i].loop->header->index,
2303 ira_loop_nodes[i].loop->header->frequency,
2304 loop_depth (ira_loop_nodes[i].loop));
2305 }
2306}
30ea859e 2307
058e97ec 2308/* Definition of vector of loop tree nodes. */
058e97ec
VM
2309
2310/* Vec containing references to all removed loop tree nodes. */
9771b263 2311static vec<ira_loop_tree_node_t> removed_loop_vec;
058e97ec
VM
2312
2313/* Vec containing references to all children of loop tree nodes. */
9771b263 2314static vec<ira_loop_tree_node_t> children_vec;
058e97ec
VM
2315
2316/* Remove subregions of NODE if their separate allocation will not
2317 improve the result. */
2318static void
2319remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2320{
2321 unsigned int start;
2322 bool remove_p;
2323 ira_loop_tree_node_t subnode;
2324
30ea859e 2325 remove_p = node->to_remove_p;
058e97ec 2326 if (! remove_p)
9771b263
DN
2327 children_vec.safe_push (node);
2328 start = children_vec.length ();
058e97ec
VM
2329 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2330 if (subnode->bb == NULL)
2331 remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2332 else
9771b263 2333 children_vec.safe_push (subnode);
058e97ec
VM
2334 node->children = node->subloops = NULL;
2335 if (remove_p)
2336 {
9771b263 2337 removed_loop_vec.safe_push (node);
058e97ec
VM
2338 return;
2339 }
9771b263 2340 while (children_vec.length () > start)
058e97ec 2341 {
9771b263 2342 subnode = children_vec.pop ();
058e97ec
VM
2343 subnode->parent = node;
2344 subnode->next = node->children;
2345 node->children = subnode;
2346 if (subnode->bb == NULL)
2347 {
2348 subnode->subloop_next = node->subloops;
2349 node->subloops = subnode;
2350 }
2351 }
2352}
2353
c6bb4c93
VM
2354/* Return TRUE if NODE is inside PARENT. */
2355static bool
2356loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2357{
2358 for (node = node->parent; node != NULL; node = node->parent)
2359 if (node == parent)
2360 return true;
2361 return false;
2362}
2363
2364/* Sort allocnos according to their order in regno allocno list. */
2365static int
2366regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2367{
2368 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2369 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2370 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2371 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2372
2373 if (loop_is_inside_p (n1, n2))
2374 return -1;
2375 else if (loop_is_inside_p (n2, n1))
2376 return 1;
2377 /* If allocnos are equally good, sort by allocno numbers, so that
2378 the results of qsort leave nothing to chance. We put allocnos
2379 with higher number first in the list because it is the original
2380 order for allocnos from loops on the same levels. */
2381 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2382}
2383
2384/* This array is used to sort allocnos to restore allocno order in
2385 the regno allocno list. */
2386static ira_allocno_t *regno_allocnos;
2387
2388/* Restore allocno order for REGNO in the regno allocno list. */
2389static void
2390ira_rebuild_regno_allocno_list (int regno)
2391{
2392 int i, n;
2393 ira_allocno_t a;
2394
2395 for (n = 0, a = ira_regno_allocno_map[regno];
2396 a != NULL;
2397 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2398 regno_allocnos[n++] = a;
2399 ira_assert (n > 0);
b8698a0f 2400 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
c6bb4c93
VM
2401 regno_allocno_order_compare_func);
2402 for (i = 1; i < n; i++)
2403 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2404 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2405 ira_regno_allocno_map[regno] = regno_allocnos[0];
2406 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2407 fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2408}
2409
311aab06
VM
2410/* Propagate info from allocno FROM_A to allocno A. */
2411static void
2412propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2413{
1756cb66 2414 enum reg_class aclass;
311aab06 2415
3c55880a 2416 merge_hard_reg_conflicts (from_a, a, false);
311aab06
VM
2417 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2418 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2419 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
311aab06 2420 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
e384e6b5
BS
2421 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2422 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
c2ba7e7a
RO
2423 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
2424 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a));
2425
311aab06
VM
2426 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2427 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2428 if (! ALLOCNO_BAD_SPILL_P (from_a))
2429 ALLOCNO_BAD_SPILL_P (a) = false;
1756cb66
VM
2430 aclass = ALLOCNO_CLASS (from_a);
2431 ira_assert (aclass == ALLOCNO_CLASS (a));
2432 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
311aab06
VM
2433 ALLOCNO_HARD_REG_COSTS (from_a));
2434 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
1756cb66 2435 aclass,
311aab06 2436 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
1756cb66 2437 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
311aab06
VM
2438 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2439}
2440
058e97ec
VM
2441/* Remove allocnos from loops removed from the allocation
2442 consideration. */
2443static void
2444remove_unnecessary_allocnos (void)
2445{
2446 int regno;
c6bb4c93 2447 bool merged_p, rebuild_p;
058e97ec
VM
2448 ira_allocno_t a, prev_a, next_a, parent_a;
2449 ira_loop_tree_node_t a_node, parent;
058e97ec
VM
2450
2451 merged_p = false;
c6bb4c93 2452 regno_allocnos = NULL;
058e97ec 2453 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
c6bb4c93
VM
2454 {
2455 rebuild_p = false;
2456 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2457 a != NULL;
2458 a = next_a)
2459 {
2460 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2461 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2462 if (! a_node->to_remove_p)
2463 prev_a = a;
2464 else
2465 {
2466 for (parent = a_node->parent;
2467 (parent_a = parent->regno_allocno_map[regno]) == NULL
2468 && parent->to_remove_p;
2469 parent = parent->parent)
2470 ;
2471 if (parent_a == NULL)
2472 {
311aab06
VM
2473 /* There are no allocnos with the same regno in
2474 upper region -- just move the allocno to the
2475 upper region. */
c6bb4c93
VM
2476 prev_a = a;
2477 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2478 parent->regno_allocno_map[regno] = a;
2479 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2480 rebuild_p = true;
2481 }
2482 else
2483 {
2484 /* Remove the allocno and update info of allocno in
2485 the upper region. */
2486 if (prev_a == NULL)
2487 ira_regno_allocno_map[regno] = next_a;
2488 else
2489 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
3c55880a 2490 move_allocno_live_ranges (a, parent_a);
c6bb4c93 2491 merged_p = true;
311aab06 2492 propagate_some_info_from_allocno (parent_a, a);
46044dd9
L
2493 /* Remove it from the corresponding regno allocno
2494 map to avoid info propagation of subsequent
2495 allocno into this already removed allocno. */
2496 a_node->regno_allocno_map[regno] = NULL;
3b6d1699 2497 ira_remove_allocno_prefs (a);
c6bb4c93
VM
2498 finish_allocno (a);
2499 }
2500 }
2501 }
2502 if (rebuild_p)
2503 /* We need to restore the order in regno allocno list. */
2504 {
2505 if (regno_allocnos == NULL)
2506 regno_allocnos
2507 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2508 * ira_allocnos_num);
2509 ira_rebuild_regno_allocno_list (regno);
2510 }
2511 }
058e97ec
VM
2512 if (merged_p)
2513 ira_rebuild_start_finish_chains ();
c6bb4c93
VM
2514 if (regno_allocnos != NULL)
2515 ira_free (regno_allocnos);
058e97ec
VM
2516}
2517
311aab06 2518/* Remove allocnos from all loops but the root. */
058e97ec 2519static void
311aab06 2520remove_low_level_allocnos (void)
058e97ec 2521{
311aab06
VM
2522 int regno;
2523 bool merged_p, propagate_p;
2524 ira_allocno_t a, top_a;
2525 ira_loop_tree_node_t a_node, parent;
311aab06
VM
2526 ira_allocno_iterator ai;
2527
2528 merged_p = false;
2529 FOR_EACH_ALLOCNO (a, ai)
2530 {
2531 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2532 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2533 continue;
2534 regno = ALLOCNO_REGNO (a);
2535 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2536 {
2537 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2538 ira_loop_tree_root->regno_allocno_map[regno] = a;
2539 continue;
2540 }
2541 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2542 /* Remove the allocno and update info of allocno in the upper
2543 region. */
3c55880a 2544 move_allocno_live_ranges (a, top_a);
311aab06 2545 merged_p = true;
311aab06
VM
2546 if (propagate_p)
2547 propagate_some_info_from_allocno (top_a, a);
2548 }
2549 FOR_EACH_ALLOCNO (a, ai)
2550 {
2551 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2552 if (a_node == ira_loop_tree_root)
2553 continue;
2554 parent = a_node->parent;
2555 regno = ALLOCNO_REGNO (a);
2556 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2557 ira_assert (ALLOCNO_CAP (a) != NULL);
2558 else if (ALLOCNO_CAP (a) == NULL)
2559 ira_assert (parent->regno_allocno_map[regno] != NULL);
2560 }
2561 FOR_EACH_ALLOCNO (a, ai)
2562 {
2563 regno = ALLOCNO_REGNO (a);
2564 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2565 {
ac0ab4f7
BS
2566 ira_object_t obj;
2567 ira_allocno_object_iterator oi;
a49ae217 2568
311aab06
VM
2569 ira_regno_allocno_map[regno] = a;
2570 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2571 ALLOCNO_CAP_MEMBER (a) = NULL;
ac0ab4f7
BS
2572 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2573 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2574 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
311aab06
VM
2575#ifdef STACK_REGS
2576 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2577 ALLOCNO_NO_STACK_REG_P (a) = true;
2578#endif
2579 }
2580 else
3b6d1699
VM
2581 {
2582 ira_remove_allocno_prefs (a);
2583 finish_allocno (a);
2584 }
311aab06
VM
2585 }
2586 if (merged_p)
2587 ira_rebuild_start_finish_chains ();
2588}
2589
2590/* Remove loops from consideration. We remove all loops except for
2591 root if ALL_P or loops for which a separate allocation will not
2592 improve the result. We have to do this after allocno creation and
1756cb66
VM
2593 their costs and allocno class evaluation because only after that
2594 the register pressure can be known and is calculated. */
311aab06
VM
2595static void
2596remove_unnecessary_regions (bool all_p)
2597{
2608d841
VM
2598 if (current_loops == NULL)
2599 return;
311aab06
VM
2600 if (all_p)
2601 mark_all_loops_for_removal ();
2602 else
2603 mark_loops_for_removal ();
8b1c6fd7
DM
2604 children_vec.create (last_basic_block_for_fn (cfun)
2605 + number_of_loops (cfun));
2606 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2607 + number_of_loops (cfun));
9771b263
DN
2608 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root);
2609 children_vec.release ();
311aab06
VM
2610 if (all_p)
2611 remove_low_level_allocnos ();
2612 else
2613 remove_unnecessary_allocnos ();
9771b263
DN
2614 while (removed_loop_vec.length () > 0)
2615 finish_loop_tree_node (removed_loop_vec.pop ());
2616 removed_loop_vec.release ();
058e97ec
VM
2617}
2618
2619\f
2620
927425df
VM
2621/* At this point true value of allocno attribute bad_spill_p means
2622 that there is an insn where allocno occurs and where the allocno
2623 can not be used as memory. The function updates the attribute, now
2624 it can be true only for allocnos which can not be used as memory in
2625 an insn and in whose live ranges there is other allocno deaths.
2626 Spilling allocnos with true value will not improve the code because
2627 it will not make other allocnos colorable and additional reloads
2628 for the corresponding pseudo will be generated in reload pass for
2629 each insn it occurs.
2630
2631 This is a trick mentioned in one classic article of Chaitin etc
2632 which is frequently omitted in other implementations of RA based on
2633 graph coloring. */
2634static void
2635update_bad_spill_attribute (void)
2636{
2637 int i;
2638 ira_allocno_t a;
2639 ira_allocno_iterator ai;
ac0ab4f7
BS
2640 ira_allocno_object_iterator aoi;
2641 ira_object_t obj;
b14151b5 2642 live_range_t r;
1756cb66 2643 enum reg_class aclass;
927425df
VM
2644 bitmap_head dead_points[N_REG_CLASSES];
2645
1756cb66 2646 for (i = 0; i < ira_allocno_classes_num; i++)
927425df 2647 {
1756cb66
VM
2648 aclass = ira_allocno_classes[i];
2649 bitmap_initialize (&dead_points[aclass], &reg_obstack);
927425df
VM
2650 }
2651 FOR_EACH_ALLOCNO (a, ai)
2652 {
1756cb66
VM
2653 aclass = ALLOCNO_CLASS (a);
2654 if (aclass == NO_REGS)
927425df 2655 continue;
ac0ab4f7
BS
2656 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2657 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
1756cb66 2658 bitmap_set_bit (&dead_points[aclass], r->finish);
927425df
VM
2659 }
2660 FOR_EACH_ALLOCNO (a, ai)
2661 {
1756cb66
VM
2662 aclass = ALLOCNO_CLASS (a);
2663 if (aclass == NO_REGS)
927425df
VM
2664 continue;
2665 if (! ALLOCNO_BAD_SPILL_P (a))
2666 continue;
ac0ab4f7 2667 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
927425df 2668 {
ac0ab4f7
BS
2669 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2670 {
2671 for (i = r->start + 1; i < r->finish; i++)
1756cb66 2672 if (bitmap_bit_p (&dead_points[aclass], i))
ac0ab4f7
BS
2673 break;
2674 if (i < r->finish)
2675 break;
2676 }
2677 if (r != NULL)
2678 {
2679 ALLOCNO_BAD_SPILL_P (a) = false;
927425df 2680 break;
ac0ab4f7 2681 }
927425df 2682 }
927425df 2683 }
1756cb66 2684 for (i = 0; i < ira_allocno_classes_num; i++)
927425df 2685 {
1756cb66
VM
2686 aclass = ira_allocno_classes[i];
2687 bitmap_clear (&dead_points[aclass]);
927425df
VM
2688 }
2689}
2690
2691\f
2692
058e97ec
VM
2693/* Set up minimal and maximal live range points for allocnos. */
2694static void
2695setup_min_max_allocno_live_range_point (void)
2696{
2697 int i;
2698 ira_allocno_t a, parent_a, cap;
2699 ira_allocno_iterator ai;
ac0ab4f7
BS
2700#ifdef ENABLE_IRA_CHECKING
2701 ira_object_iterator oi;
2702 ira_object_t obj;
2703#endif
b14151b5 2704 live_range_t r;
058e97ec
VM
2705 ira_loop_tree_node_t parent;
2706
2707 FOR_EACH_ALLOCNO (a, ai)
2708 {
ac0ab4f7 2709 int n = ALLOCNO_NUM_OBJECTS (a);
1756cb66 2710
ac0ab4f7
BS
2711 for (i = 0; i < n; i++)
2712 {
2713 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2714 r = OBJECT_LIVE_RANGES (obj);
2715 if (r == NULL)
2716 continue;
2717 OBJECT_MAX (obj) = r->finish;
2718 for (; r->next != NULL; r = r->next)
2719 ;
2720 OBJECT_MIN (obj) = r->start;
2721 }
058e97ec
VM
2722 }
2723 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2724 for (a = ira_regno_allocno_map[i];
2725 a != NULL;
2726 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2727 {
ac0ab4f7
BS
2728 int j;
2729 int n = ALLOCNO_NUM_OBJECTS (a);
1756cb66 2730
ac0ab4f7 2731 for (j = 0; j < n; j++)
058e97ec 2732 {
ac0ab4f7
BS
2733 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2734 ira_object_t parent_obj;
2735
2736 if (OBJECT_MAX (obj) < 0)
2737 continue;
2738 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2739 /* Accumulation of range info. */
2740 if (ALLOCNO_CAP (a) != NULL)
058e97ec 2741 {
ac0ab4f7
BS
2742 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2743 {
2744 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2745 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2746 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2747 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2748 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2749 }
2750 continue;
058e97ec 2751 }
ac0ab4f7
BS
2752 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2753 continue;
2754 parent_a = parent->regno_allocno_map[i];
2755 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2756 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2757 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2758 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2759 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
058e97ec 2760 }
058e97ec
VM
2761 }
2762#ifdef ENABLE_IRA_CHECKING
ac0ab4f7 2763 FOR_EACH_OBJECT (obj, oi)
058e97ec 2764 {
a49ae217
BS
2765 if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2766 && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
058e97ec
VM
2767 continue;
2768 gcc_unreachable ();
2769 }
2770#endif
2771}
2772
2773/* Sort allocnos according to their live ranges. Allocnos with
1756cb66
VM
2774 smaller allocno class are put first unless we use priority
2775 coloring. Allocnos with the same class are ordered according
2776 their start (min). Allocnos with the same start are ordered
2777 according their finish (max). */
058e97ec 2778static int
ac0ab4f7 2779object_range_compare_func (const void *v1p, const void *v2p)
058e97ec
VM
2780{
2781 int diff;
a49ae217
BS
2782 ira_object_t obj1 = *(const ira_object_t *) v1p;
2783 ira_object_t obj2 = *(const ira_object_t *) v2p;
2784 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2785 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
058e97ec 2786
a49ae217 2787 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
058e97ec 2788 return diff;
a49ae217 2789 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
058e97ec
VM
2790 return diff;
2791 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2792}
2793
a49ae217 2794/* Sort ira_object_id_map and set up conflict id of allocnos. */
058e97ec 2795static void
a49ae217 2796sort_conflict_id_map (void)
058e97ec
VM
2797{
2798 int i, num;
2799 ira_allocno_t a;
2800 ira_allocno_iterator ai;
2801
2802 num = 0;
2803 FOR_EACH_ALLOCNO (a, ai)
ac0ab4f7
BS
2804 {
2805 ira_allocno_object_iterator oi;
2806 ira_object_t obj;
2807
2808 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2809 ira_object_id_map[num++] = obj;
2810 }
85c00e0b
JJ
2811 if (num > 1)
2812 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2813 object_range_compare_func);
058e97ec 2814 for (i = 0; i < num; i++)
a49ae217
BS
2815 {
2816 ira_object_t obj = ira_object_id_map[i];
1756cb66 2817
a49ae217
BS
2818 gcc_assert (obj != NULL);
2819 OBJECT_CONFLICT_ID (obj) = i;
2820 }
2821 for (i = num; i < ira_objects_num; i++)
2822 ira_object_id_map[i] = NULL;
058e97ec
VM
2823}
2824
2825/* Set up minimal and maximal conflict ids of allocnos with which
2826 given allocno can conflict. */
2827static void
2828setup_min_max_conflict_allocno_ids (void)
2829{
1756cb66 2830 int aclass;
058e97ec
VM
2831 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2832 int *live_range_min, *last_lived;
ac0ab4f7 2833 int word0_min, word0_max;
058e97ec 2834 ira_allocno_t a;
ac0ab4f7 2835 ira_allocno_iterator ai;
058e97ec 2836
a49ae217 2837 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
1756cb66 2838 aclass = -1;
058e97ec 2839 first_not_finished = -1;
a49ae217 2840 for (i = 0; i < ira_objects_num; i++)
058e97ec 2841 {
a49ae217 2842 ira_object_t obj = ira_object_id_map[i];
1756cb66 2843
a49ae217 2844 if (obj == NULL)
058e97ec 2845 continue;
a49ae217
BS
2846
2847 a = OBJECT_ALLOCNO (obj);
2848
1756cb66 2849 if (aclass < 0)
058e97ec 2850 {
1756cb66 2851 aclass = ALLOCNO_CLASS (a);
058e97ec
VM
2852 min = i;
2853 first_not_finished = i;
2854 }
2855 else
2856 {
a49ae217 2857 start = OBJECT_MIN (obj);
058e97ec
VM
2858 /* If we skip an allocno, the allocno with smaller ids will
2859 be also skipped because of the secondary sorting the
2860 range finishes (see function
ac0ab4f7 2861 object_range_compare_func). */
058e97ec 2862 while (first_not_finished < i
a49ae217 2863 && start > OBJECT_MAX (ira_object_id_map
ac0ab4f7 2864 [first_not_finished]))
058e97ec
VM
2865 first_not_finished++;
2866 min = first_not_finished;
b8698a0f 2867 }
058e97ec
VM
2868 if (min == i)
2869 /* We could increase min further in this case but it is good
2870 enough. */
2871 min++;
a49ae217
BS
2872 live_range_min[i] = OBJECT_MIN (obj);
2873 OBJECT_MIN (obj) = min;
058e97ec
VM
2874 }
2875 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
1756cb66 2876 aclass = -1;
058e97ec 2877 filled_area_start = -1;
a49ae217 2878 for (i = ira_objects_num - 1; i >= 0; i--)
058e97ec 2879 {
a49ae217 2880 ira_object_t obj = ira_object_id_map[i];
1756cb66 2881
a49ae217 2882 if (obj == NULL)
058e97ec 2883 continue;
a49ae217
BS
2884
2885 a = OBJECT_ALLOCNO (obj);
1756cb66 2886 if (aclass < 0)
058e97ec 2887 {
1756cb66 2888 aclass = ALLOCNO_CLASS (a);
058e97ec
VM
2889 for (j = 0; j < ira_max_point; j++)
2890 last_lived[j] = -1;
2891 filled_area_start = ira_max_point;
2892 }
2893 min = live_range_min[i];
a49ae217 2894 finish = OBJECT_MAX (obj);
058e97ec
VM
2895 max = last_lived[finish];
2896 if (max < 0)
2897 /* We could decrease max further in this case but it is good
2898 enough. */
a49ae217
BS
2899 max = OBJECT_CONFLICT_ID (obj) - 1;
2900 OBJECT_MAX (obj) = max;
058e97ec
VM
2901 /* In filling, we can go further A range finish to recognize
2902 intersection quickly because if the finish of subsequently
2903 processed allocno (it has smaller conflict id) range is
2904 further A range finish than they are definitely intersected
2905 (the reason for this is the allocnos with bigger conflict id
2906 have their range starts not smaller than allocnos with
2907 smaller ids. */
2908 for (j = min; j < filled_area_start; j++)
2909 last_lived[j] = i;
2910 filled_area_start = min;
2911 }
2912 ira_free (last_lived);
2913 ira_free (live_range_min);
ac0ab4f7
BS
2914
2915 /* For allocnos with more than one object, we may later record extra conflicts in
2916 subobject 0 that we cannot really know about here.
2917 For now, simply widen the min/max range of these subobjects. */
2918
2919 word0_min = INT_MAX;
2920 word0_max = INT_MIN;
2921
2922 FOR_EACH_ALLOCNO (a, ai)
2923 {
2924 int n = ALLOCNO_NUM_OBJECTS (a);
2925 ira_object_t obj0;
1756cb66 2926
ac0ab4f7
BS
2927 if (n < 2)
2928 continue;
2929 obj0 = ALLOCNO_OBJECT (a, 0);
2930 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2931 word0_min = OBJECT_CONFLICT_ID (obj0);
2932 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2933 word0_max = OBJECT_CONFLICT_ID (obj0);
2934 }
2935 FOR_EACH_ALLOCNO (a, ai)
2936 {
2937 int n = ALLOCNO_NUM_OBJECTS (a);
2938 ira_object_t obj0;
1756cb66 2939
ac0ab4f7
BS
2940 if (n < 2)
2941 continue;
2942 obj0 = ALLOCNO_OBJECT (a, 0);
2943 if (OBJECT_MIN (obj0) > word0_min)
2944 OBJECT_MIN (obj0) = word0_min;
2945 if (OBJECT_MAX (obj0) < word0_max)
2946 OBJECT_MAX (obj0) = word0_max;
2947 }
058e97ec
VM
2948}
2949
2950\f
2951
2952static void
2953create_caps (void)
2954{
2955 ira_allocno_t a;
2956 ira_allocno_iterator ai;
2957 ira_loop_tree_node_t loop_tree_node;
2958
2959 FOR_EACH_ALLOCNO (a, ai)
2960 {
2961 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2962 continue;
2963 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2964 create_cap_allocno (a);
2965 else if (ALLOCNO_CAP (a) == NULL)
2966 {
2967 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2968 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2969 create_cap_allocno (a);
2970 }
2971 }
2972}
2973
2974\f
2975
2976/* The page contains code transforming more one region internal
2977 representation (IR) to one region IR which is necessary for reload.
2978 This transformation is called IR flattening. We might just rebuild
2979 the IR for one region but we don't do it because it takes a lot of
2980 time. */
2981
82b33628
VM
2982/* Map: regno -> allocnos which will finally represent the regno for
2983 IR with one region. */
2984static ira_allocno_t *regno_top_level_allocno_map;
2985
029da7d4
BS
2986/* Find the allocno that corresponds to A at a level one higher up in the
2987 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2988ira_allocno_t
2989ira_parent_allocno (ira_allocno_t a)
2990{
2991 ira_loop_tree_node_t parent;
2992
2993 if (ALLOCNO_CAP (a) != NULL)
2994 return NULL;
2995
2996 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2997 if (parent == NULL)
2998 return NULL;
2999
3000 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3001}
3002
3003/* Find the allocno that corresponds to A at a level one higher up in the
3004 loop tree. If ALLOCNO_CAP is set for A, return that. */
3005ira_allocno_t
3006ira_parent_or_cap_allocno (ira_allocno_t a)
3007{
3008 if (ALLOCNO_CAP (a) != NULL)
3009 return ALLOCNO_CAP (a);
3010
3011 return ira_parent_allocno (a);
3012}
3013
82b33628 3014/* Process all allocnos originated from pseudo REGNO and copy live
801f03e3
VM
3015 ranges, hard reg conflicts, and allocno stack reg attributes from
3016 low level allocnos to final allocnos which are destinations of
3017 removed stores at a loop exit. Return true if we copied live
3018 ranges. */
82b33628 3019static bool
801f03e3 3020copy_info_to_removed_store_destinations (int regno)
82b33628 3021{
504b33d8
ILT
3022 ira_allocno_t a;
3023 ira_allocno_t parent_a = NULL;
82b33628 3024 ira_loop_tree_node_t parent;
82b33628
VM
3025 bool merged_p;
3026
3027 merged_p = false;
3028 for (a = ira_regno_allocno_map[regno];
3029 a != NULL;
3030 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3031 {
1756cb66 3032 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
82b33628
VM
3033 /* This allocno will be removed. */
3034 continue;
ac0ab4f7 3035
82b33628
VM
3036 /* Caps will be removed. */
3037 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3038 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3039 parent != NULL;
3040 parent = parent->parent)
3041 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
1756cb66
VM
3042 || (parent_a
3043 == regno_top_level_allocno_map[REGNO
3044 (allocno_emit_reg (parent_a))]
3045 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
82b33628
VM
3046 break;
3047 if (parent == NULL || parent_a == NULL)
3048 continue;
ac0ab4f7 3049
3c55880a
BS
3050 copy_allocno_live_ranges (a, parent_a);
3051 merge_hard_reg_conflicts (a, parent_a, true);
ac0ab4f7 3052
ea1c67e6
VM
3053 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3054 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3055 += ALLOCNO_CALLS_CROSSED_NUM (a);
e384e6b5
BS
3056 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3057 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
c2ba7e7a
RO
3058 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a),
3059 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
ea1c67e6
VM
3060 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3061 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
82b33628
VM
3062 merged_p = true;
3063 }
3064 return merged_p;
058e97ec
VM
3065}
3066
3067/* Flatten the IR. In other words, this function transforms IR as if
3068 it were built with one region (without loops). We could make it
3069 much simpler by rebuilding IR with one region, but unfortunately it
3070 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3071 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3072 IRA_MAX_POINT before emitting insns on the loop borders. */
3073void
3074ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3075{
9140d27b 3076 int i, j;
ea1c67e6 3077 bool keep_p;
058e97ec 3078 int hard_regs_num;
82b33628 3079 bool new_pseudos_p, merged_p, mem_dest_p;
058e97ec 3080 unsigned int n;
1756cb66 3081 enum reg_class aclass;
058e97ec 3082 ira_allocno_t a, parent_a, first, second, node_first, node_second;
058e97ec 3083 ira_copy_t cp;
029da7d4 3084 ira_loop_tree_node_t node;
b14151b5 3085 live_range_t r;
058e97ec
VM
3086 ira_allocno_iterator ai;
3087 ira_copy_iterator ci;
058e97ec
VM
3088
3089 regno_top_level_allocno_map
1756cb66
VM
3090 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3091 * sizeof (ira_allocno_t));
058e97ec
VM
3092 memset (regno_top_level_allocno_map, 0,
3093 max_reg_num () * sizeof (ira_allocno_t));
058e97ec 3094 new_pseudos_p = merged_p = false;
0ca9fa56
VM
3095 FOR_EACH_ALLOCNO (a, ai)
3096 {
ac0ab4f7
BS
3097 ira_allocno_object_iterator oi;
3098 ira_object_t obj;
1756cb66 3099
0ca9fa56
VM
3100 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3101 /* Caps are not in the regno allocno maps and they are never
3102 will be transformed into allocnos existing after IR
3103 flattening. */
3104 continue;
ac0ab4f7
BS
3105 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3106 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3107 OBJECT_CONFLICT_HARD_REGS (obj));
0ca9fa56
VM
3108#ifdef STACK_REGS
3109 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3110#endif
3111 }
058e97ec
VM
3112 /* Fix final allocno attributes. */
3113 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3114 {
0ca9fa56 3115 mem_dest_p = false;
058e97ec
VM
3116 for (a = ira_regno_allocno_map[i];
3117 a != NULL;
3118 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3119 {
1756cb66
VM
3120 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3121
058e97ec 3122 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1756cb66 3123 if (data->somewhere_renamed_p)
058e97ec 3124 new_pseudos_p = true;
029da7d4
BS
3125 parent_a = ira_parent_allocno (a);
3126 if (parent_a == NULL)
058e97ec
VM
3127 {
3128 ALLOCNO_COPIES (a) = NULL;
1756cb66 3129 regno_top_level_allocno_map[REGNO (data->reg)] = a;
058e97ec
VM
3130 continue;
3131 }
3132 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
b8698a0f 3133
1756cb66 3134 if (data->mem_optimized_dest != NULL)
82b33628 3135 mem_dest_p = true;
1756cb66
VM
3136 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3137 if (REGNO (data->reg) == REGNO (parent_data->reg))
058e97ec 3138 {
3c55880a
BS
3139 merge_hard_reg_conflicts (a, parent_a, true);
3140 move_allocno_live_ranges (a, parent_a);
058e97ec 3141 merged_p = true;
1756cb66
VM
3142 parent_data->mem_optimized_dest_p
3143 = (parent_data->mem_optimized_dest_p
3144 || data->mem_optimized_dest_p);
058e97ec
VM
3145 continue;
3146 }
3147 new_pseudos_p = true;
058e97ec
VM
3148 for (;;)
3149 {
058e97ec
VM
3150 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3151 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
ea1c67e6
VM
3152 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3153 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3154 -= ALLOCNO_CALLS_CROSSED_NUM (a);
e384e6b5
BS
3155 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3156 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
ea1c67e6
VM
3157 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3158 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
058e97ec
VM
3159 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3160 && ALLOCNO_NREFS (parent_a) >= 0
3161 && ALLOCNO_FREQ (parent_a) >= 0);
1756cb66
VM
3162 aclass = ALLOCNO_CLASS (parent_a);
3163 hard_regs_num = ira_class_hard_regs_num[aclass];
058e97ec
VM
3164 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3165 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3166 for (j = 0; j < hard_regs_num; j++)
3167 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3168 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3169 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3170 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3171 for (j = 0; j < hard_regs_num; j++)
3172 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3173 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
1756cb66
VM
3174 ALLOCNO_CLASS_COST (parent_a)
3175 -= ALLOCNO_CLASS_COST (a);
058e97ec 3176 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
029da7d4
BS
3177 parent_a = ira_parent_allocno (parent_a);
3178 if (parent_a == NULL)
058e97ec
VM
3179 break;
3180 }
058e97ec 3181 ALLOCNO_COPIES (a) = NULL;
1756cb66 3182 regno_top_level_allocno_map[REGNO (data->reg)] = a;
058e97ec 3183 }
801f03e3 3184 if (mem_dest_p && copy_info_to_removed_store_destinations (i))
82b33628 3185 merged_p = true;
058e97ec 3186 }
058e97ec
VM
3187 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3188 if (merged_p || ira_max_point_before_emit != ira_max_point)
3189 ira_rebuild_start_finish_chains ();
3190 if (new_pseudos_p)
3191 {
9140d27b
BS
3192 sparseset objects_live;
3193
058e97ec
VM
3194 /* Rebuild conflicts. */
3195 FOR_EACH_ALLOCNO (a, ai)
3196 {
ac0ab4f7
BS
3197 ira_allocno_object_iterator oi;
3198 ira_object_t obj;
1756cb66
VM
3199
3200 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
058e97ec
VM
3201 || ALLOCNO_CAP_MEMBER (a) != NULL)
3202 continue;
ac0ab4f7
BS
3203 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3204 {
3205 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3206 ira_assert (r->object == obj);
3207 clear_conflicts (obj);
3208 }
058e97ec 3209 }
9140d27b 3210 objects_live = sparseset_alloc (ira_objects_num);
058e97ec
VM
3211 for (i = 0; i < ira_max_point; i++)
3212 {
3213 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3214 {
9140d27b 3215 ira_object_t obj = r->object;
1756cb66 3216
9140d27b 3217 a = OBJECT_ALLOCNO (obj);
1756cb66 3218 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
058e97ec
VM
3219 || ALLOCNO_CAP_MEMBER (a) != NULL)
3220 continue;
ac0ab4f7 3221
1756cb66 3222 aclass = ALLOCNO_CLASS (a);
9140d27b 3223 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
058e97ec 3224 {
9140d27b
BS
3225 ira_object_t live_obj = ira_object_id_map[n];
3226 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
1756cb66
VM
3227 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3228
3229 if (ira_reg_classes_intersect_p[aclass][live_aclass]
058e97ec 3230 /* Don't set up conflict for the allocno with itself. */
9140d27b
BS
3231 && live_a != a)
3232 ira_add_conflict (obj, live_obj);
058e97ec 3233 }
3feb0298 3234 sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
058e97ec 3235 }
b8698a0f 3236
058e97ec 3237 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
9140d27b 3238 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
058e97ec 3239 }
9140d27b 3240 sparseset_free (objects_live);
058e97ec
VM
3241 compress_conflict_vecs ();
3242 }
3243 /* Mark some copies for removing and change allocnos in the rest
3244 copies. */
3245 FOR_EACH_COPY (cp, ci)
3246 {
3247 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3248 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3249 {
3250 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3251 fprintf
3252 (ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3253 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
1756cb66
VM
3254 ALLOCNO_NUM (cp->first),
3255 REGNO (allocno_emit_reg (cp->first)),
058e97ec 3256 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
1756cb66
VM
3257 ALLOCNO_NUM (cp->second),
3258 REGNO (allocno_emit_reg (cp->second)));
058e97ec
VM
3259 cp->loop_tree_node = NULL;
3260 continue;
3261 }
1756cb66
VM
3262 first
3263 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3264 second
3265 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
058e97ec
VM
3266 node = cp->loop_tree_node;
3267 if (node == NULL)
3268 keep_p = true; /* It copy generated in ira-emit.c. */
3269 else
3270 {
3271 /* Check that the copy was not propagated from level on
3272 which we will have different pseudos. */
3273 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3274 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
1756cb66
VM
3275 keep_p = ((REGNO (allocno_emit_reg (first))
3276 == REGNO (allocno_emit_reg (node_first)))
3277 && (REGNO (allocno_emit_reg (second))
3278 == REGNO (allocno_emit_reg (node_second))));
058e97ec
VM
3279 }
3280 if (keep_p)
3281 {
3282 cp->loop_tree_node = ira_loop_tree_root;
3283 cp->first = first;
3284 cp->second = second;
3285 }
3286 else
3287 {
3288 cp->loop_tree_node = NULL;
3289 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3290 fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
3291 cp->num, ALLOCNO_NUM (cp->first),
1756cb66
VM
3292 REGNO (allocno_emit_reg (cp->first)),
3293 ALLOCNO_NUM (cp->second),
3294 REGNO (allocno_emit_reg (cp->second)));
058e97ec
VM
3295 }
3296 }
3297 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3298 FOR_EACH_ALLOCNO (a, ai)
3299 {
1756cb66 3300 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
058e97ec
VM
3301 || ALLOCNO_CAP_MEMBER (a) != NULL)
3302 {
3303 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3304 fprintf (ira_dump_file, " Remove a%dr%d\n",
1756cb66 3305 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3b6d1699 3306 ira_remove_allocno_prefs (a);
058e97ec
VM
3307 finish_allocno (a);
3308 continue;
3309 }
3310 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
1756cb66 3311 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
058e97ec 3312 ALLOCNO_CAP (a) = NULL;
cb1ca6ac 3313 /* Restore updated costs for assignments from reload. */
058e97ec 3314 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
1756cb66 3315 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
058e97ec
VM
3316 if (! ALLOCNO_ASSIGNED_P (a))
3317 ira_free_allocno_updated_costs (a);
3318 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3319 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3320 }
3321 /* Remove unnecessary copies. */
3322 FOR_EACH_COPY (cp, ci)
3323 {
3324 if (cp->loop_tree_node == NULL)
3325 {
3326 ira_copies[cp->num] = NULL;
3327 finish_copy (cp);
3328 continue;
3329 }
3330 ira_assert
3331 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3332 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3b6d1699
VM
3333 add_allocno_copy_to_list (cp);
3334 swap_allocno_copy_ends_if_necessary (cp);
058e97ec
VM
3335 }
3336 rebuild_regno_allocno_maps ();
b15a7ae6
VM
3337 if (ira_max_point != ira_max_point_before_emit)
3338 ira_compress_allocno_live_ranges ();
058e97ec
VM
3339 ira_free (regno_top_level_allocno_map);
3340}
3341
3342\f
3343
3344#ifdef ENABLE_IRA_CHECKING
3345/* Check creation of all allocnos. Allocnos on lower levels should
3346 have allocnos or caps on all upper levels. */
3347static void
3348check_allocno_creation (void)
3349{
3350 ira_allocno_t a;
3351 ira_allocno_iterator ai;
3352 ira_loop_tree_node_t loop_tree_node;
3353
3354 FOR_EACH_ALLOCNO (a, ai)
3355 {
49d988e7
VM
3356 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3357 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3358 ALLOCNO_NUM (a)));
3359 if (loop_tree_node == ira_loop_tree_root)
058e97ec
VM
3360 continue;
3361 if (ALLOCNO_CAP_MEMBER (a) != NULL)
49d988e7 3362 ira_assert (ALLOCNO_CAP (a) != NULL);
058e97ec 3363 else if (ALLOCNO_CAP (a) == NULL)
49d988e7
VM
3364 ira_assert (loop_tree_node->parent
3365 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3366 && bitmap_bit_p (loop_tree_node->border_allocnos,
3367 ALLOCNO_NUM (a)));
058e97ec
VM
3368 }
3369}
3370#endif
3371
4ac293e2
VM
3372/* Identify allocnos which prefer a register class with a single hard register.
3373 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3374 less likely to use the preferred singleton register. */
3375static void
3376update_conflict_hard_reg_costs (void)
3377{
3378 ira_allocno_t a;
3379 ira_allocno_iterator ai;
3380 int i, index, min;
3381
3382 FOR_EACH_ALLOCNO (a, ai)
3383 {
6f76a878
AS
3384 reg_class_t aclass = ALLOCNO_CLASS (a);
3385 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
c9d74da6
RS
3386 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3387 if (singleton < 0)
4ac293e2 3388 continue;
c9d74da6 3389 index = ira_class_hard_reg_index[(int) aclass][singleton];
4ac293e2
VM
3390 if (index < 0)
3391 continue;
3392 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3393 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3394 continue;
3395 min = INT_MAX;
6f76a878 3396 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
1756cb66 3397 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
4ac293e2
VM
3398 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3399 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3400 if (min == INT_MAX)
3401 continue;
3402 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
1756cb66 3403 aclass, 0);
4ac293e2 3404 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
1756cb66 3405 -= min - ALLOCNO_CLASS_COST (a);
4ac293e2
VM
3406 }
3407}
3408
058e97ec 3409/* Create a internal representation (IR) for IRA (allocnos, copies,
2608d841
VM
3410 loop tree nodes). The function returns TRUE if we generate loop
3411 structure (besides nodes representing all function and the basic
3412 blocks) for regional allocation. A true return means that we
3413 really need to flatten IR before the reload. */
058e97ec 3414bool
2608d841 3415ira_build (void)
058e97ec 3416{
2608d841 3417 bool loops_p;
058e97ec 3418
2608d841 3419 df_analyze ();
058e97ec
VM
3420 initiate_cost_vectors ();
3421 initiate_allocnos ();
3b6d1699 3422 initiate_prefs ();
058e97ec 3423 initiate_copies ();
2608d841 3424 create_loop_tree_nodes ();
058e97ec
VM
3425 form_loop_tree ();
3426 create_allocnos ();
3427 ira_costs ();
a49ae217 3428 create_allocno_objects ();
058e97ec 3429 ira_create_allocno_live_ranges ();
311aab06 3430 remove_unnecessary_regions (false);
b15a7ae6 3431 ira_compress_allocno_live_ranges ();
927425df 3432 update_bad_spill_attribute ();
058e97ec
VM
3433 loops_p = more_one_region_p ();
3434 if (loops_p)
3435 {
3436 propagate_allocno_info ();
3437 create_caps ();
3438 }
1756cb66 3439 ira_tune_allocno_costs ();
058e97ec
VM
3440#ifdef ENABLE_IRA_CHECKING
3441 check_allocno_creation ();
3442#endif
3443 setup_min_max_allocno_live_range_point ();
a49ae217 3444 sort_conflict_id_map ();
058e97ec
VM
3445 setup_min_max_conflict_allocno_ids ();
3446 ira_build_conflicts ();
4ac293e2 3447 update_conflict_hard_reg_costs ();
311aab06
VM
3448 if (! ira_conflicts_p)
3449 {
3450 ira_allocno_t a;
3451 ira_allocno_iterator ai;
3452
3453 /* Remove all regions but root one. */
3454 if (loops_p)
3455 {
3456 remove_unnecessary_regions (true);
3457 loops_p = false;
3458 }
3459 /* We don't save hard registers around calls for fast allocation
3460 -- add caller clobbered registers as conflicting ones to
3461 allocno crossing calls. */
3462 FOR_EACH_ALLOCNO (a, ai)
3463 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
ac0ab4f7 3464 ior_hard_reg_conflicts (a, &call_used_reg_set);
311aab06 3465 }
4cda38d5
VM
3466 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3467 print_copies (ira_dump_file);
3b6d1699
VM
3468 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3469 print_prefs (ira_dump_file);
058e97ec
VM
3470 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3471 {
ac0ab4f7 3472 int n, nr, nr_big;
058e97ec 3473 ira_allocno_t a;
b14151b5 3474 live_range_t r;
058e97ec
VM
3475 ira_allocno_iterator ai;
3476
3477 n = 0;
ac0ab4f7
BS
3478 nr = 0;
3479 nr_big = 0;
058e97ec 3480 FOR_EACH_ALLOCNO (a, ai)
a49ae217 3481 {
ac0ab4f7 3482 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
1756cb66 3483
ac0ab4f7
BS
3484 if (nobj > 1)
3485 nr_big++;
3486 for (j = 0; j < nobj; j++)
3487 {
3488 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3489 n += OBJECT_NUM_CONFLICTS (obj);
3490 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3491 nr++;
3492 }
a49ae217 3493 }
058e97ec 3494 fprintf (ira_dump_file, " regions=%d, blocks=%d, points=%d\n",
0fc822d0 3495 current_loops == NULL ? 1 : number_of_loops (cfun),
0cae8d31 3496 n_basic_blocks_for_fn (cfun), ira_max_point);
058e97ec 3497 fprintf (ira_dump_file,
ac0ab4f7
BS
3498 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3499 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
058e97ec
VM
3500 }
3501 return loops_p;
3502}
3503
3504/* Release the data created by function ira_build. */
3505void
3506ira_destroy (void)
3507{
3508 finish_loop_tree_nodes ();
3b6d1699 3509 finish_prefs ();
058e97ec
VM
3510 finish_copies ();
3511 finish_allocnos ();
3512 finish_cost_vectors ();
3513 ira_finish_allocno_live_ranges ();
3514}