]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ira-color.c
rs6000-internal.h (create_TOC_reference): Delete.
[thirdparty/gcc.git] / gcc / ira-color.c
CommitLineData
058e97ec 1/* IRA allocation based on graph coloring.
a5544970 2 Copyright (C) 2006-2019 Free Software Foundation, Inc.
058e97ec
VM
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
c7131fb2 24#include "backend.h"
957060b5 25#include "target.h"
058e97ec 26#include "rtl.h"
957060b5
AM
27#include "tree.h"
28#include "predict.h"
c7131fb2 29#include "df.h"
4d0cdd0c 30#include "memmodel.h"
058e97ec 31#include "tm_p.h"
957060b5 32#include "insn-config.h"
058e97ec 33#include "regs.h"
957060b5
AM
34#include "ira.h"
35#include "ira-int.h"
058e97ec 36#include "reload.h"
c7131fb2 37#include "cfgloop.h"
058e97ec 38
27508f5f 39typedef struct allocno_hard_regs *allocno_hard_regs_t;
1756cb66
VM
40
41/* The structure contains information about hard registers can be
27508f5f 42 assigned to allocnos. Usually it is allocno profitable hard
1756cb66
VM
43 registers but in some cases this set can be a bit different. Major
44 reason of the difference is a requirement to use hard register sets
45 that form a tree or a forest (set of trees), i.e. hard register set
46 of a node should contain hard register sets of its subnodes. */
27508f5f 47struct allocno_hard_regs
1756cb66
VM
48{
49 /* Hard registers can be assigned to an allocno. */
50 HARD_REG_SET set;
51 /* Overall (spilling) cost of all allocnos with given register
52 set. */
a9243bfc 53 int64_t cost;
1756cb66
VM
54};
55
27508f5f 56typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
1756cb66 57
27508f5f 58/* A node representing allocno hard registers. Such nodes form a
1756cb66 59 forest (set of trees). Each subnode of given node in the forest
27508f5f 60 refers for hard register set (usually allocno profitable hard
1756cb66
VM
61 register set) which is a subset of one referred from given
62 node. */
27508f5f 63struct allocno_hard_regs_node
1756cb66
VM
64{
65 /* Set up number of the node in preorder traversing of the forest. */
66 int preorder_num;
67 /* Used for different calculation like finding conflict size of an
68 allocno. */
69 int check;
70 /* Used for calculation of conflict size of an allocno. The
27508f5f 71 conflict size of the allocno is maximal number of given allocno
1756cb66
VM
72 hard registers needed for allocation of the conflicting allocnos.
73 Given allocno is trivially colored if this number plus the number
74 of hard registers needed for given allocno is not greater than
75 the number of given allocno hard register set. */
76 int conflict_size;
77 /* The number of hard registers given by member hard_regs. */
78 int hard_regs_num;
79 /* The following member is used to form the final forest. */
80 bool used_p;
81 /* Pointer to the corresponding profitable hard registers. */
27508f5f 82 allocno_hard_regs_t hard_regs;
1756cb66
VM
83 /* Parent, first subnode, previous and next node with the same
84 parent in the forest. */
27508f5f 85 allocno_hard_regs_node_t parent, first, prev, next;
1756cb66
VM
86};
87
3b6d1699
VM
88/* Info about changing hard reg costs of an allocno. */
89struct update_cost_record
90{
91 /* Hard regno for which we changed the cost. */
92 int hard_regno;
93 /* Divisor used when we changed the cost of HARD_REGNO. */
94 int divisor;
95 /* Next record for given allocno. */
96 struct update_cost_record *next;
97};
98
1756cb66
VM
99/* To decrease footprint of ira_allocno structure we store all data
100 needed only for coloring in the following structure. */
101struct allocno_color_data
102{
103 /* TRUE value means that the allocno was not removed yet from the
df3e3493 104 conflicting graph during coloring. */
1756cb66
VM
105 unsigned int in_graph_p : 1;
106 /* TRUE if it is put on the stack to make other allocnos
107 colorable. */
108 unsigned int may_be_spilled_p : 1;
27508f5f 109 /* TRUE if the allocno is trivially colorable. */
1756cb66
VM
110 unsigned int colorable_p : 1;
111 /* Number of hard registers of the allocno class really
112 available for the allocno allocation. It is number of the
113 profitable hard regs. */
114 int available_regs_num;
8c679205
VM
115 /* Sum of frequencies of hard register preferences of all
116 conflicting allocnos which are not the coloring stack yet. */
117 int conflict_allocno_hard_prefs;
1756cb66
VM
118 /* Allocnos in a bucket (used in coloring) chained by the following
119 two members. */
120 ira_allocno_t next_bucket_allocno;
121 ira_allocno_t prev_bucket_allocno;
122 /* Used for temporary purposes. */
123 int temp;
27508f5f
VM
124 /* Used to exclude repeated processing. */
125 int last_process;
1756cb66
VM
126 /* Profitable hard regs available for this pseudo allocation. It
127 means that the set excludes unavailable hard regs and hard regs
128 conflicting with given pseudo. They should be of the allocno
129 class. */
130 HARD_REG_SET profitable_hard_regs;
27508f5f
VM
131 /* The allocno hard registers node. */
132 allocno_hard_regs_node_t hard_regs_node;
133 /* Array of structures allocno_hard_regs_subnode representing
134 given allocno hard registers node (the 1st element in the array)
135 and all its subnodes in the tree (forest) of allocno hard
1756cb66
VM
136 register nodes (see comments above). */
137 int hard_regs_subnodes_start;
2b9c63a2 138 /* The length of the previous array. */
1756cb66 139 int hard_regs_subnodes_num;
3b6d1699
VM
140 /* Records about updating allocno hard reg costs from copies. If
141 the allocno did not get expected hard register, these records are
142 used to restore original hard reg costs of allocnos connected to
143 this allocno by copies. */
144 struct update_cost_record *update_cost_records;
bf08fb16
VM
145 /* Threads. We collect allocnos connected by copies into threads
146 and try to assign hard regs to allocnos by threads. */
147 /* Allocno representing all thread. */
148 ira_allocno_t first_thread_allocno;
149 /* Allocnos in thread forms a cycle list through the following
150 member. */
151 ira_allocno_t next_thread_allocno;
152 /* All thread frequency. Defined only for first thread allocno. */
153 int thread_freq;
1756cb66
VM
154};
155
156/* See above. */
27508f5f 157typedef struct allocno_color_data *allocno_color_data_t;
1756cb66 158
27508f5f
VM
159/* Container for storing allocno data concerning coloring. */
160static allocno_color_data_t allocno_color_data;
1756cb66
VM
161
162/* Macro to access the data concerning coloring. */
27508f5f
VM
163#define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
164
165/* Used for finding allocno colorability to exclude repeated allocno
166 processing and for updating preferencing to exclude repeated
167 allocno processing during assignment. */
168static int curr_allocno_process;
1756cb66 169
058e97ec
VM
170/* This file contains code for regional graph coloring, spill/restore
171 code placement optimization, and code helping the reload pass to do
172 a better job. */
173
174/* Bitmap of allocnos which should be colored. */
175static bitmap coloring_allocno_bitmap;
176
177/* Bitmap of allocnos which should be taken into account during
178 coloring. In general case it contains allocnos from
179 coloring_allocno_bitmap plus other already colored conflicting
180 allocnos. */
181static bitmap consideration_allocno_bitmap;
182
058e97ec
VM
183/* All allocnos sorted according their priorities. */
184static ira_allocno_t *sorted_allocnos;
185
186/* Vec representing the stack of allocnos used during coloring. */
9771b263 187static vec<ira_allocno_t> allocno_stack_vec;
058e97ec 188
71af27d2
OH
189/* Helper for qsort comparison callbacks - return a positive integer if
190 X > Y, or a negative value otherwise. Use a conditional expression
191 instead of a difference computation to insulate from possible overflow
192 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
193#define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
194
058e97ec
VM
195\f
196
27508f5f 197/* Definition of vector of allocno hard registers. */
fe82cdfb 198
27508f5f 199/* Vector of unique allocno hard registers. */
9771b263 200static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
1756cb66 201
8d67ee55 202struct allocno_hard_regs_hasher : nofree_ptr_hash <allocno_hard_regs>
1756cb66 203{
67f58944
TS
204 static inline hashval_t hash (const allocno_hard_regs *);
205 static inline bool equal (const allocno_hard_regs *,
206 const allocno_hard_regs *);
4a8fb1a1 207};
1756cb66 208
4a8fb1a1
LC
209/* Returns hash value for allocno hard registers V. */
210inline hashval_t
67f58944 211allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
4a8fb1a1 212{
1756cb66
VM
213 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
214}
215
27508f5f 216/* Compares allocno hard registers V1 and V2. */
4a8fb1a1 217inline bool
67f58944
TS
218allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
219 const allocno_hard_regs *hv2)
1756cb66 220{
1756cb66
VM
221 return hard_reg_set_equal_p (hv1->set, hv2->set);
222}
223
27508f5f 224/* Hash table of unique allocno hard registers. */
c203e8a7 225static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
1756cb66 226
27508f5f
VM
227/* Return allocno hard registers in the hash table equal to HV. */
228static allocno_hard_regs_t
229find_hard_regs (allocno_hard_regs_t hv)
1756cb66 230{
c203e8a7 231 return allocno_hard_regs_htab->find (hv);
1756cb66
VM
232}
233
234/* Insert allocno hard registers HV in the hash table (if it is not
235 there yet) and return the value which in the table. */
27508f5f
VM
236static allocno_hard_regs_t
237insert_hard_regs (allocno_hard_regs_t hv)
1756cb66 238{
c203e8a7 239 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
1756cb66
VM
240
241 if (*slot == NULL)
242 *slot = hv;
4a8fb1a1 243 return *slot;
1756cb66
VM
244}
245
27508f5f 246/* Initialize data concerning allocno hard registers. */
1756cb66 247static void
27508f5f 248init_allocno_hard_regs (void)
1756cb66 249{
9771b263 250 allocno_hard_regs_vec.create (200);
c203e8a7
TS
251 allocno_hard_regs_htab
252 = new hash_table<allocno_hard_regs_hasher> (200);
1756cb66
VM
253}
254
27508f5f 255/* Add (or update info about) allocno hard registers with SET and
1756cb66 256 COST. */
27508f5f 257static allocno_hard_regs_t
a9243bfc 258add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
1756cb66 259{
27508f5f
VM
260 struct allocno_hard_regs temp;
261 allocno_hard_regs_t hv;
1756cb66
VM
262
263 gcc_assert (! hard_reg_set_empty_p (set));
264 COPY_HARD_REG_SET (temp.set, set);
265 if ((hv = find_hard_regs (&temp)) != NULL)
266 hv->cost += cost;
267 else
268 {
27508f5f
VM
269 hv = ((struct allocno_hard_regs *)
270 ira_allocate (sizeof (struct allocno_hard_regs)));
1756cb66
VM
271 COPY_HARD_REG_SET (hv->set, set);
272 hv->cost = cost;
9771b263 273 allocno_hard_regs_vec.safe_push (hv);
1756cb66
VM
274 insert_hard_regs (hv);
275 }
276 return hv;
277}
278
279/* Finalize data concerning allocno hard registers. */
280static void
27508f5f 281finish_allocno_hard_regs (void)
1756cb66
VM
282{
283 int i;
27508f5f 284 allocno_hard_regs_t hv;
1756cb66
VM
285
286 for (i = 0;
9771b263 287 allocno_hard_regs_vec.iterate (i, &hv);
1756cb66
VM
288 i++)
289 ira_free (hv);
c203e8a7
TS
290 delete allocno_hard_regs_htab;
291 allocno_hard_regs_htab = NULL;
9771b263 292 allocno_hard_regs_vec.release ();
1756cb66
VM
293}
294
295/* Sort hard regs according to their frequency of usage. */
296static int
27508f5f 297allocno_hard_regs_compare (const void *v1p, const void *v2p)
1756cb66 298{
27508f5f
VM
299 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
300 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
1756cb66
VM
301
302 if (hv2->cost > hv1->cost)
303 return 1;
304 else if (hv2->cost < hv1->cost)
305 return -1;
5804f627 306 return SORTGT (allocno_hard_regs_hasher::hash(hv2), allocno_hard_regs_hasher::hash(hv1));
1756cb66
VM
307}
308
309\f
310
311/* Used for finding a common ancestor of two allocno hard registers
312 nodes in the forest. We use the current value of
313 'node_check_tick' to mark all nodes from one node to the top and
314 then walking up from another node until we find a marked node.
315
316 It is also used to figure out allocno colorability as a mark that
317 we already reset value of member 'conflict_size' for the forest
318 node corresponding to the processed allocno. */
319static int node_check_tick;
320
321/* Roots of the forest containing hard register sets can be assigned
27508f5f
VM
322 to allocnos. */
323static allocno_hard_regs_node_t hard_regs_roots;
1756cb66 324
27508f5f 325/* Definition of vector of allocno hard register nodes. */
1756cb66
VM
326
327/* Vector used to create the forest. */
9771b263 328static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
1756cb66 329
27508f5f 330/* Create and return allocno hard registers node containing allocno
1756cb66 331 hard registers HV. */
27508f5f
VM
332static allocno_hard_regs_node_t
333create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
1756cb66 334{
27508f5f 335 allocno_hard_regs_node_t new_node;
1756cb66 336
27508f5f
VM
337 new_node = ((struct allocno_hard_regs_node *)
338 ira_allocate (sizeof (struct allocno_hard_regs_node)));
1756cb66
VM
339 new_node->check = 0;
340 new_node->hard_regs = hv;
341 new_node->hard_regs_num = hard_reg_set_size (hv->set);
342 new_node->first = NULL;
343 new_node->used_p = false;
344 return new_node;
345}
346
27508f5f 347/* Add allocno hard registers node NEW_NODE to the forest on its level
1756cb66
VM
348 given by ROOTS. */
349static void
27508f5f
VM
350add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
351 allocno_hard_regs_node_t new_node)
1756cb66
VM
352{
353 new_node->next = *roots;
354 if (new_node->next != NULL)
355 new_node->next->prev = new_node;
356 new_node->prev = NULL;
357 *roots = new_node;
358}
359
27508f5f 360/* Add allocno hard registers HV (or its best approximation if it is
1756cb66
VM
361 not possible) to the forest on its level given by ROOTS. */
362static void
27508f5f
VM
363add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
364 allocno_hard_regs_t hv)
1756cb66
VM
365{
366 unsigned int i, start;
27508f5f 367 allocno_hard_regs_node_t node, prev, new_node;
1756cb66 368 HARD_REG_SET temp_set;
27508f5f 369 allocno_hard_regs_t hv2;
1756cb66 370
9771b263 371 start = hard_regs_node_vec.length ();
1756cb66
VM
372 for (node = *roots; node != NULL; node = node->next)
373 {
374 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
375 return;
376 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
377 {
27508f5f 378 add_allocno_hard_regs_to_forest (&node->first, hv);
1756cb66
VM
379 return;
380 }
381 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
9771b263 382 hard_regs_node_vec.safe_push (node);
1756cb66
VM
383 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
384 {
385 COPY_HARD_REG_SET (temp_set, hv->set);
386 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
27508f5f
VM
387 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
388 add_allocno_hard_regs_to_forest (&node->first, hv2);
1756cb66
VM
389 }
390 }
9771b263 391 if (hard_regs_node_vec.length ()
1756cb66
VM
392 > start + 1)
393 {
394 /* Create a new node which contains nodes in hard_regs_node_vec. */
395 CLEAR_HARD_REG_SET (temp_set);
396 for (i = start;
9771b263 397 i < hard_regs_node_vec.length ();
1756cb66
VM
398 i++)
399 {
9771b263 400 node = hard_regs_node_vec[i];
1756cb66
VM
401 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
402 }
27508f5f
VM
403 hv = add_allocno_hard_regs (temp_set, hv->cost);
404 new_node = create_new_allocno_hard_regs_node (hv);
1756cb66
VM
405 prev = NULL;
406 for (i = start;
9771b263 407 i < hard_regs_node_vec.length ();
1756cb66
VM
408 i++)
409 {
9771b263 410 node = hard_regs_node_vec[i];
1756cb66
VM
411 if (node->prev == NULL)
412 *roots = node->next;
413 else
414 node->prev->next = node->next;
415 if (node->next != NULL)
416 node->next->prev = node->prev;
417 if (prev == NULL)
418 new_node->first = node;
419 else
420 prev->next = node;
421 node->prev = prev;
422 node->next = NULL;
423 prev = node;
424 }
27508f5f 425 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
1756cb66 426 }
9771b263 427 hard_regs_node_vec.truncate (start);
1756cb66
VM
428}
429
27508f5f 430/* Add allocno hard registers nodes starting with the forest level
1756cb66
VM
431 given by FIRST which contains biggest set inside SET. */
432static void
27508f5f 433collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
1756cb66
VM
434 HARD_REG_SET set)
435{
27508f5f 436 allocno_hard_regs_node_t node;
1756cb66
VM
437
438 ira_assert (first != NULL);
439 for (node = first; node != NULL; node = node->next)
440 if (hard_reg_set_subset_p (node->hard_regs->set, set))
9771b263 441 hard_regs_node_vec.safe_push (node);
1756cb66 442 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
27508f5f 443 collect_allocno_hard_regs_cover (node->first, set);
1756cb66
VM
444}
445
27508f5f 446/* Set up field parent as PARENT in all allocno hard registers nodes
1756cb66
VM
447 in forest given by FIRST. */
448static void
27508f5f
VM
449setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
450 allocno_hard_regs_node_t parent)
1756cb66 451{
27508f5f 452 allocno_hard_regs_node_t node;
1756cb66
VM
453
454 for (node = first; node != NULL; node = node->next)
455 {
456 node->parent = parent;
27508f5f 457 setup_allocno_hard_regs_nodes_parent (node->first, node);
1756cb66
VM
458 }
459}
460
27508f5f 461/* Return allocno hard registers node which is a first common ancestor
1756cb66 462 node of FIRST and SECOND in the forest. */
27508f5f
VM
463static allocno_hard_regs_node_t
464first_common_ancestor_node (allocno_hard_regs_node_t first,
465 allocno_hard_regs_node_t second)
1756cb66 466{
27508f5f 467 allocno_hard_regs_node_t node;
1756cb66
VM
468
469 node_check_tick++;
470 for (node = first; node != NULL; node = node->parent)
471 node->check = node_check_tick;
472 for (node = second; node != NULL; node = node->parent)
473 if (node->check == node_check_tick)
474 return node;
475 return first_common_ancestor_node (second, first);
476}
477
478/* Print hard reg set SET to F. */
479static void
480print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
481{
482 int i, start;
483
484 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
485 {
486 if (TEST_HARD_REG_BIT (set, i))
487 {
488 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
489 start = i;
490 }
491 if (start >= 0
492 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
493 {
494 if (start == i - 1)
495 fprintf (f, " %d", start);
496 else if (start == i - 2)
497 fprintf (f, " %d %d", start, start + 1);
498 else
499 fprintf (f, " %d-%d", start, i - 1);
500 start = -1;
501 }
502 }
503 if (new_line_p)
504 fprintf (f, "\n");
505}
506
27508f5f 507/* Print allocno hard register subforest given by ROOTS and its LEVEL
1756cb66
VM
508 to F. */
509static void
27508f5f 510print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
1756cb66
VM
511 int level)
512{
513 int i;
27508f5f 514 allocno_hard_regs_node_t node;
1756cb66
VM
515
516 for (node = roots; node != NULL; node = node->next)
517 {
518 fprintf (f, " ");
519 for (i = 0; i < level * 2; i++)
520 fprintf (f, " ");
521 fprintf (f, "%d:(", node->preorder_num);
522 print_hard_reg_set (f, node->hard_regs->set, false);
16998094 523 fprintf (f, ")@%" PRId64"\n", node->hard_regs->cost);
1756cb66
VM
524 print_hard_regs_subforest (f, node->first, level + 1);
525 }
526}
527
27508f5f 528/* Print the allocno hard register forest to F. */
1756cb66
VM
529static void
530print_hard_regs_forest (FILE *f)
531{
532 fprintf (f, " Hard reg set forest:\n");
533 print_hard_regs_subforest (f, hard_regs_roots, 1);
534}
535
27508f5f 536/* Print the allocno hard register forest to stderr. */
1756cb66
VM
537void
538ira_debug_hard_regs_forest (void)
539{
540 print_hard_regs_forest (stderr);
541}
542
27508f5f 543/* Remove unused allocno hard registers nodes from forest given by its
1756cb66
VM
544 *ROOTS. */
545static void
27508f5f 546remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
1756cb66 547{
27508f5f 548 allocno_hard_regs_node_t node, prev, next, last;
1756cb66
VM
549
550 for (prev = NULL, node = *roots; node != NULL; node = next)
551 {
552 next = node->next;
553 if (node->used_p)
554 {
27508f5f 555 remove_unused_allocno_hard_regs_nodes (&node->first);
1756cb66
VM
556 prev = node;
557 }
558 else
559 {
560 for (last = node->first;
561 last != NULL && last->next != NULL;
562 last = last->next)
563 ;
564 if (last != NULL)
565 {
566 if (prev == NULL)
567 *roots = node->first;
568 else
569 prev->next = node->first;
570 if (next != NULL)
571 next->prev = last;
572 last->next = next;
573 next = node->first;
574 }
575 else
576 {
577 if (prev == NULL)
578 *roots = next;
579 else
580 prev->next = next;
581 if (next != NULL)
582 next->prev = prev;
583 }
584 ira_free (node);
585 }
586 }
587}
588
27508f5f 589/* Set up fields preorder_num starting with START_NUM in all allocno
1756cb66
VM
590 hard registers nodes in forest given by FIRST. Return biggest set
591 PREORDER_NUM increased by 1. */
592static int
27508f5f
VM
593enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
594 allocno_hard_regs_node_t parent,
595 int start_num)
1756cb66 596{
27508f5f 597 allocno_hard_regs_node_t node;
1756cb66
VM
598
599 for (node = first; node != NULL; node = node->next)
600 {
601 node->preorder_num = start_num++;
602 node->parent = parent;
27508f5f
VM
603 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
604 start_num);
1756cb66
VM
605 }
606 return start_num;
607}
608
27508f5f
VM
609/* Number of allocno hard registers nodes in the forest. */
610static int allocno_hard_regs_nodes_num;
1756cb66 611
27508f5f
VM
612/* Table preorder number of allocno hard registers node in the forest
613 -> the allocno hard registers node. */
614static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
1756cb66
VM
615
616/* See below. */
27508f5f 617typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
1756cb66
VM
618
619/* The structure is used to describes all subnodes (not only immediate
27508f5f 620 ones) in the mentioned above tree for given allocno hard register
1756cb66
VM
621 node. The usage of such data accelerates calculation of
622 colorability of given allocno. */
27508f5f 623struct allocno_hard_regs_subnode
1756cb66
VM
624{
625 /* The conflict size of conflicting allocnos whose hard register
626 sets are equal sets (plus supersets if given node is given
27508f5f 627 allocno hard registers node) of one in the given node. */
1756cb66
VM
628 int left_conflict_size;
629 /* The summary conflict size of conflicting allocnos whose hard
630 register sets are strict subsets of one in the given node.
631 Overall conflict size is
632 left_conflict_subnodes_size
633 + MIN (max_node_impact - left_conflict_subnodes_size,
634 left_conflict_size)
635 */
636 short left_conflict_subnodes_size;
637 short max_node_impact;
638};
639
27508f5f
VM
640/* Container for hard regs subnodes of all allocnos. */
641static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
1756cb66 642
27508f5f
VM
643/* Table (preorder number of allocno hard registers node in the
644 forest, preorder number of allocno hard registers subnode) -> index
1756cb66
VM
645 of the subnode relative to the node. -1 if it is not a
646 subnode. */
27508f5f 647static int *allocno_hard_regs_subnode_index;
1756cb66 648
27508f5f
VM
649/* Setup arrays ALLOCNO_HARD_REGS_NODES and
650 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
1756cb66 651static void
27508f5f 652setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
1756cb66 653{
27508f5f 654 allocno_hard_regs_node_t node, parent;
1756cb66
VM
655 int index;
656
657 for (node = first; node != NULL; node = node->next)
658 {
27508f5f 659 allocno_hard_regs_nodes[node->preorder_num] = node;
1756cb66
VM
660 for (parent = node; parent != NULL; parent = parent->parent)
661 {
27508f5f
VM
662 index = parent->preorder_num * allocno_hard_regs_nodes_num;
663 allocno_hard_regs_subnode_index[index + node->preorder_num]
1756cb66
VM
664 = node->preorder_num - parent->preorder_num;
665 }
27508f5f 666 setup_allocno_hard_regs_subnode_index (node->first);
1756cb66
VM
667 }
668}
669
27508f5f 670/* Count all allocno hard registers nodes in tree ROOT. */
1756cb66 671static int
27508f5f 672get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
1756cb66
VM
673{
674 int len = 1;
675
676 for (root = root->first; root != NULL; root = root->next)
27508f5f 677 len += get_allocno_hard_regs_subnodes_num (root);
1756cb66
VM
678 return len;
679}
680
27508f5f 681/* Build the forest of allocno hard registers nodes and assign each
1756cb66
VM
682 allocno a node from the forest. */
683static void
27508f5f 684form_allocno_hard_regs_nodes_forest (void)
1756cb66
VM
685{
686 unsigned int i, j, size, len;
27508f5f 687 int start;
1756cb66 688 ira_allocno_t a;
27508f5f 689 allocno_hard_regs_t hv;
1756cb66
VM
690 bitmap_iterator bi;
691 HARD_REG_SET temp;
27508f5f
VM
692 allocno_hard_regs_node_t node, allocno_hard_regs_node;
693 allocno_color_data_t allocno_data;
1756cb66
VM
694
695 node_check_tick = 0;
27508f5f 696 init_allocno_hard_regs ();
1756cb66 697 hard_regs_roots = NULL;
9771b263 698 hard_regs_node_vec.create (100);
1756cb66
VM
699 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
700 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
701 {
702 CLEAR_HARD_REG_SET (temp);
703 SET_HARD_REG_BIT (temp, i);
27508f5f
VM
704 hv = add_allocno_hard_regs (temp, 0);
705 node = create_new_allocno_hard_regs_node (hv);
706 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
1756cb66 707 }
9771b263 708 start = allocno_hard_regs_vec.length ();
1756cb66
VM
709 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
710 {
711 a = ira_allocnos[i];
27508f5f
VM
712 allocno_data = ALLOCNO_COLOR_DATA (a);
713
714 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
715 continue;
716 hv = (add_allocno_hard_regs
717 (allocno_data->profitable_hard_regs,
718 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
1756cb66
VM
719 }
720 SET_HARD_REG_SET (temp);
721 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
27508f5f 722 add_allocno_hard_regs (temp, 0);
9771b263
DN
723 qsort (allocno_hard_regs_vec.address () + start,
724 allocno_hard_regs_vec.length () - start,
27508f5f 725 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
1756cb66 726 for (i = start;
9771b263 727 allocno_hard_regs_vec.iterate (i, &hv);
1756cb66
VM
728 i++)
729 {
27508f5f 730 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
9771b263 731 ira_assert (hard_regs_node_vec.length () == 0);
1756cb66
VM
732 }
733 /* We need to set up parent fields for right work of
734 first_common_ancestor_node. */
27508f5f 735 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
1756cb66
VM
736 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
737 {
738 a = ira_allocnos[i];
27508f5f
VM
739 allocno_data = ALLOCNO_COLOR_DATA (a);
740 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
741 continue;
9771b263 742 hard_regs_node_vec.truncate (0);
27508f5f
VM
743 collect_allocno_hard_regs_cover (hard_regs_roots,
744 allocno_data->profitable_hard_regs);
745 allocno_hard_regs_node = NULL;
9771b263 746 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
27508f5f
VM
747 allocno_hard_regs_node
748 = (j == 0
749 ? node
750 : first_common_ancestor_node (node, allocno_hard_regs_node));
751 /* That is a temporary storage. */
752 allocno_hard_regs_node->used_p = true;
753 allocno_data->hard_regs_node = allocno_hard_regs_node;
1756cb66
VM
754 }
755 ira_assert (hard_regs_roots->next == NULL);
756 hard_regs_roots->used_p = true;
27508f5f
VM
757 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
758 allocno_hard_regs_nodes_num
759 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
760 allocno_hard_regs_nodes
761 = ((allocno_hard_regs_node_t *)
762 ira_allocate (allocno_hard_regs_nodes_num
763 * sizeof (allocno_hard_regs_node_t)));
764 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
765 allocno_hard_regs_subnode_index
1756cb66
VM
766 = (int *) ira_allocate (size * sizeof (int));
767 for (i = 0; i < size; i++)
27508f5f
VM
768 allocno_hard_regs_subnode_index[i] = -1;
769 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
1756cb66
VM
770 start = 0;
771 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
772 {
773 a = ira_allocnos[i];
27508f5f
VM
774 allocno_data = ALLOCNO_COLOR_DATA (a);
775 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
776 continue;
777 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
778 allocno_data->hard_regs_subnodes_start = start;
779 allocno_data->hard_regs_subnodes_num = len;
780 start += len;
1756cb66 781 }
27508f5f
VM
782 allocno_hard_regs_subnodes
783 = ((allocno_hard_regs_subnode_t)
784 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
9771b263 785 hard_regs_node_vec.release ();
1756cb66
VM
786}
787
27508f5f 788/* Free tree of allocno hard registers nodes given by its ROOT. */
1756cb66 789static void
27508f5f 790finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
1756cb66 791{
27508f5f 792 allocno_hard_regs_node_t child, next;
1756cb66
VM
793
794 for (child = root->first; child != NULL; child = next)
795 {
796 next = child->next;
27508f5f 797 finish_allocno_hard_regs_nodes_tree (child);
1756cb66
VM
798 }
799 ira_free (root);
800}
801
27508f5f 802/* Finish work with the forest of allocno hard registers nodes. */
1756cb66 803static void
27508f5f 804finish_allocno_hard_regs_nodes_forest (void)
1756cb66 805{
27508f5f 806 allocno_hard_regs_node_t node, next;
1756cb66 807
27508f5f 808 ira_free (allocno_hard_regs_subnodes);
1756cb66
VM
809 for (node = hard_regs_roots; node != NULL; node = next)
810 {
811 next = node->next;
27508f5f 812 finish_allocno_hard_regs_nodes_tree (node);
1756cb66 813 }
27508f5f
VM
814 ira_free (allocno_hard_regs_nodes);
815 ira_free (allocno_hard_regs_subnode_index);
816 finish_allocno_hard_regs ();
1756cb66
VM
817}
818
819/* Set up left conflict sizes and left conflict subnodes sizes of hard
820 registers subnodes of allocno A. Return TRUE if allocno A is
821 trivially colorable. */
3553f0bb 822static bool
1756cb66 823setup_left_conflict_sizes_p (ira_allocno_t a)
3553f0bb 824{
27508f5f
VM
825 int i, k, nobj, start;
826 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
1756cb66 827 allocno_color_data_t data;
27508f5f
VM
828 HARD_REG_SET profitable_hard_regs;
829 allocno_hard_regs_subnode_t subnodes;
830 allocno_hard_regs_node_t node;
831 HARD_REG_SET node_set;
ac0ab4f7 832
1756cb66 833 nobj = ALLOCNO_NUM_OBJECTS (a);
1756cb66 834 data = ALLOCNO_COLOR_DATA (a);
27508f5f
VM
835 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
836 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
837 node = data->hard_regs_node;
838 node_preorder_num = node->preorder_num;
839 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
840 node_check_tick++;
1756cb66
VM
841 for (k = 0; k < nobj; k++)
842 {
1756cb66
VM
843 ira_object_t obj = ALLOCNO_OBJECT (a, k);
844 ira_object_t conflict_obj;
845 ira_object_conflict_iterator oci;
1756cb66 846
1756cb66
VM
847 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
848 {
849 int size;
850 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
27508f5f 851 allocno_hard_regs_node_t conflict_node, temp_node;
1756cb66 852 HARD_REG_SET conflict_node_set;
27508f5f 853 allocno_color_data_t conflict_data;
1756cb66 854
27508f5f 855 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
1756cb66
VM
856 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
857 || ! hard_reg_set_intersect_p (profitable_hard_regs,
27508f5f 858 conflict_data
1756cb66
VM
859 ->profitable_hard_regs))
860 continue;
27508f5f 861 conflict_node = conflict_data->hard_regs_node;
1756cb66
VM
862 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
863 if (hard_reg_set_subset_p (node_set, conflict_node_set))
864 temp_node = node;
865 else
866 {
867 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
868 temp_node = conflict_node;
869 }
870 if (temp_node->check != node_check_tick)
871 {
872 temp_node->check = node_check_tick;
873 temp_node->conflict_size = 0;
874 }
875 size = (ira_reg_class_max_nregs
876 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
877 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
878 /* We will deal with the subwords individually. */
879 size = 1;
880 temp_node->conflict_size += size;
881 }
27508f5f
VM
882 }
883 for (i = 0; i < data->hard_regs_subnodes_num; i++)
884 {
885 allocno_hard_regs_node_t temp_node;
886
887 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
888 ira_assert (temp_node->preorder_num == i + node_preorder_num);
889 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
890 ? 0 : temp_node->conflict_size);
891 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
892 profitable_hard_regs))
893 subnodes[i].max_node_impact = temp_node->hard_regs_num;
894 else
1756cb66 895 {
27508f5f
VM
896 HARD_REG_SET temp_set;
897 int j, n, hard_regno;
898 enum reg_class aclass;
899
900 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
901 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
902 aclass = ALLOCNO_CLASS (a);
903 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
1756cb66 904 {
27508f5f
VM
905 hard_regno = ira_class_hard_regs[aclass][j];
906 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
907 n++;
1756cb66 908 }
27508f5f 909 subnodes[i].max_node_impact = n;
1756cb66 910 }
27508f5f
VM
911 subnodes[i].left_conflict_subnodes_size = 0;
912 }
913 start = node_preorder_num * allocno_hard_regs_nodes_num;
6e3957da 914 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
27508f5f
VM
915 {
916 int size, parent_i;
917 allocno_hard_regs_node_t parent;
918
919 size = (subnodes[i].left_conflict_subnodes_size
920 + MIN (subnodes[i].max_node_impact
921 - subnodes[i].left_conflict_subnodes_size,
922 subnodes[i].left_conflict_size));
923 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
6e3957da 924 gcc_checking_assert(parent);
27508f5f
VM
925 parent_i
926 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
6e3957da 927 gcc_checking_assert(parent_i >= 0);
27508f5f 928 subnodes[parent_i].left_conflict_subnodes_size += size;
1756cb66 929 }
27508f5f
VM
930 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
931 conflict_size
32721b2c
ZZ
932 = (left_conflict_subnodes_size
933 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
934 subnodes[0].left_conflict_size));
1756cb66
VM
935 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
936 data->colorable_p = conflict_size <= data->available_regs_num;
937 return data->colorable_p;
938}
ac0ab4f7 939
1756cb66 940/* Update left conflict sizes of hard registers subnodes of allocno A
27508f5f
VM
941 after removing allocno REMOVED_A with SIZE from the conflict graph.
942 Return TRUE if A is trivially colorable. */
1756cb66
VM
943static bool
944update_left_conflict_sizes_p (ira_allocno_t a,
27508f5f 945 ira_allocno_t removed_a, int size)
1756cb66 946{
27508f5f 947 int i, conflict_size, before_conflict_size, diff, start;
1756cb66 948 int node_preorder_num, parent_i;
27508f5f
VM
949 allocno_hard_regs_node_t node, removed_node, parent;
950 allocno_hard_regs_subnode_t subnodes;
1756cb66 951 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1756cb66
VM
952
953 ira_assert (! data->colorable_p);
27508f5f
VM
954 node = data->hard_regs_node;
955 node_preorder_num = node->preorder_num;
956 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
957 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
958 node->hard_regs->set)
959 || hard_reg_set_subset_p (node->hard_regs->set,
960 removed_node->hard_regs->set));
961 start = node_preorder_num * allocno_hard_regs_nodes_num;
962 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
963 if (i < 0)
964 i = 0;
965 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
966 before_conflict_size
967 = (subnodes[i].left_conflict_subnodes_size
968 + MIN (subnodes[i].max_node_impact
969 - subnodes[i].left_conflict_subnodes_size,
970 subnodes[i].left_conflict_size));
971 subnodes[i].left_conflict_size -= size;
972 for (;;)
ac0ab4f7 973 {
27508f5f
VM
974 conflict_size
975 = (subnodes[i].left_conflict_subnodes_size
976 + MIN (subnodes[i].max_node_impact
977 - subnodes[i].left_conflict_subnodes_size,
978 subnodes[i].left_conflict_size));
979 if ((diff = before_conflict_size - conflict_size) == 0)
980 break;
981 ira_assert (conflict_size < before_conflict_size);
982 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
983 if (parent == NULL)
984 break;
985 parent_i
986 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
987 if (parent_i < 0)
988 break;
989 i = parent_i;
1756cb66
VM
990 before_conflict_size
991 = (subnodes[i].left_conflict_subnodes_size
992 + MIN (subnodes[i].max_node_impact
993 - subnodes[i].left_conflict_subnodes_size,
994 subnodes[i].left_conflict_size));
27508f5f 995 subnodes[i].left_conflict_subnodes_size -= diff;
ac0ab4f7 996 }
27508f5f
VM
997 if (i != 0
998 || (conflict_size
999 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1000 > data->available_regs_num))
1001 return false;
1002 data->colorable_p = true;
1003 return true;
3553f0bb
VM
1004}
1005
27508f5f 1006/* Return true if allocno A has empty profitable hard regs. */
3553f0bb 1007static bool
1756cb66 1008empty_profitable_hard_regs (ira_allocno_t a)
3553f0bb 1009{
27508f5f 1010 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1756cb66 1011
27508f5f 1012 return hard_reg_set_empty_p (data->profitable_hard_regs);
3553f0bb
VM
1013}
1014
1756cb66
VM
1015/* Set up profitable hard registers for each allocno being
1016 colored. */
1017static void
1018setup_profitable_hard_regs (void)
1019{
1020 unsigned int i;
1021 int j, k, nobj, hard_regno, nregs, class_size;
1022 ira_allocno_t a;
1023 bitmap_iterator bi;
1024 enum reg_class aclass;
ef4bddc2 1025 machine_mode mode;
27508f5f 1026 allocno_color_data_t data;
1756cb66 1027
8d189b3f
VM
1028 /* Initial set up from allocno classes and explicitly conflicting
1029 hard regs. */
1756cb66
VM
1030 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1031 {
1032 a = ira_allocnos[i];
1033 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1034 continue;
27508f5f
VM
1035 data = ALLOCNO_COLOR_DATA (a);
1036 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
b81a2f0d
VM
1037 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a)
1038 /* Do not empty profitable regs for static chain pointer
1039 pseudo when non-local goto is used. */
1040 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
27508f5f
VM
1041 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1042 else
1756cb66 1043 {
a2c19e93 1044 mode = ALLOCNO_MODE (a);
27508f5f 1045 COPY_HARD_REG_SET (data->profitable_hard_regs,
a2c19e93 1046 ira_useful_class_mode_regs[aclass][mode]);
27508f5f
VM
1047 nobj = ALLOCNO_NUM_OBJECTS (a);
1048 for (k = 0; k < nobj; k++)
1756cb66 1049 {
27508f5f
VM
1050 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1051
1052 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1756cb66
VM
1053 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1054 }
1055 }
1056 }
8d189b3f 1057 /* Exclude hard regs already assigned for conflicting objects. */
1756cb66
VM
1058 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1059 {
1060 a = ira_allocnos[i];
1061 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1062 || ! ALLOCNO_ASSIGNED_P (a)
1063 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1064 continue;
1065 mode = ALLOCNO_MODE (a);
ad474626 1066 nregs = hard_regno_nregs (hard_regno, mode);
1756cb66
VM
1067 nobj = ALLOCNO_NUM_OBJECTS (a);
1068 for (k = 0; k < nobj; k++)
1069 {
1070 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1071 ira_object_t conflict_obj;
1072 ira_object_conflict_iterator oci;
1073
1074 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1075 {
27508f5f
VM
1076 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1077
1078 /* We can process the conflict allocno repeatedly with
1079 the same result. */
1756cb66
VM
1080 if (nregs == nobj && nregs > 1)
1081 {
1082 int num = OBJECT_SUBWORD (conflict_obj);
1083
2805e6c0 1084 if (REG_WORDS_BIG_ENDIAN)
1756cb66 1085 CLEAR_HARD_REG_BIT
27508f5f 1086 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1756cb66
VM
1087 hard_regno + nobj - num - 1);
1088 else
1089 CLEAR_HARD_REG_BIT
27508f5f 1090 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1756cb66
VM
1091 hard_regno + num);
1092 }
1093 else
1094 AND_COMPL_HARD_REG_SET
27508f5f 1095 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1756cb66
VM
1096 ira_reg_mode_hard_regset[hard_regno][mode]);
1097 }
1098 }
1099 }
8d189b3f 1100 /* Exclude too costly hard regs. */
1756cb66
VM
1101 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1102 {
1103 int min_cost = INT_MAX;
1104 int *costs;
1105
1106 a = ira_allocnos[i];
1107 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1108 || empty_profitable_hard_regs (a))
1109 continue;
27508f5f 1110 data = ALLOCNO_COLOR_DATA (a);
27508f5f
VM
1111 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1112 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1756cb66 1113 {
27508f5f
VM
1114 class_size = ira_class_hard_regs_num[aclass];
1115 for (j = 0; j < class_size; j++)
1756cb66 1116 {
27508f5f
VM
1117 hard_regno = ira_class_hard_regs[aclass][j];
1118 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1119 hard_regno))
1120 continue;
b81a2f0d
VM
1121 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j]
1122 /* Do not remove HARD_REGNO for static chain pointer
1123 pseudo when non-local goto is used. */
1124 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
27508f5f
VM
1125 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1126 hard_regno);
1127 else if (min_cost > costs[j])
1128 min_cost = costs[j];
1756cb66 1129 }
1756cb66 1130 }
27508f5f 1131 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
b81a2f0d
VM
1132 < ALLOCNO_UPDATED_CLASS_COST (a)
1133 /* Do not empty profitable regs for static chain
1134 pointer pseudo when non-local goto is used. */
1135 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
27508f5f 1136 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1756cb66
VM
1137 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1138 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1139 }
1140}
3553f0bb
VM
1141
1142\f
1143
058e97ec
VM
1144/* This page contains functions used to choose hard registers for
1145 allocnos. */
1146
3b6d1699 1147/* Pool for update cost records. */
fb0b2914 1148static object_allocator<update_cost_record> update_cost_record_pool
fcb87c50 1149 ("update cost records");
3b6d1699
VM
1150
1151/* Return new update cost record with given params. */
1152static struct update_cost_record *
1153get_update_cost_record (int hard_regno, int divisor,
1154 struct update_cost_record *next)
1155{
1156 struct update_cost_record *record;
1157
8b17d27f 1158 record = update_cost_record_pool.allocate ();
3b6d1699
VM
1159 record->hard_regno = hard_regno;
1160 record->divisor = divisor;
1161 record->next = next;
1162 return record;
1163}
1164
1165/* Free memory for all records in LIST. */
1166static void
1167free_update_cost_record_list (struct update_cost_record *list)
1168{
1169 struct update_cost_record *next;
1170
1171 while (list != NULL)
1172 {
1173 next = list->next;
8b17d27f 1174 update_cost_record_pool.remove (list);
3b6d1699
VM
1175 list = next;
1176 }
1177}
1178
1179/* Free memory allocated for all update cost records. */
1180static void
1181finish_update_cost_records (void)
1182{
8b17d27f 1183 update_cost_record_pool.release ();
3b6d1699
VM
1184}
1185
058e97ec
VM
1186/* Array whose element value is TRUE if the corresponding hard
1187 register was already allocated for an allocno. */
1188static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1189
f754734f 1190/* Describes one element in a queue of allocnos whose costs need to be
1756cb66
VM
1191 updated. Each allocno in the queue is known to have an allocno
1192 class. */
f35bf7a9
RS
1193struct update_cost_queue_elem
1194{
f754734f
RS
1195 /* This element is in the queue iff CHECK == update_cost_check. */
1196 int check;
1197
1198 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1199 connecting this allocno to the one being allocated. */
1200 int divisor;
1201
df3e3493 1202 /* Allocno from which we are chaining costs of connected allocnos.
3b6d1699
VM
1203 It is used not go back in graph of allocnos connected by
1204 copies. */
1205 ira_allocno_t from;
1206
f754734f
RS
1207 /* The next allocno in the queue, or null if this is the last element. */
1208 ira_allocno_t next;
1209};
1210
1211/* The first element in a queue of allocnos whose copy costs need to be
1212 updated. Null if the queue is empty. */
1213static ira_allocno_t update_cost_queue;
1214
1215/* The last element in the queue described by update_cost_queue.
1216 Not valid if update_cost_queue is null. */
1217static struct update_cost_queue_elem *update_cost_queue_tail;
1218
1219/* A pool of elements in the queue described by update_cost_queue.
1220 Elements are indexed by ALLOCNO_NUM. */
1221static struct update_cost_queue_elem *update_cost_queue_elems;
058e97ec 1222
3b6d1699 1223/* The current value of update_costs_from_copies call count. */
058e97ec
VM
1224static int update_cost_check;
1225
1226/* Allocate and initialize data necessary for function
c73ccc80 1227 update_costs_from_copies. */
058e97ec
VM
1228static void
1229initiate_cost_update (void)
1230{
f754734f
RS
1231 size_t size;
1232
1233 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1234 update_cost_queue_elems
1235 = (struct update_cost_queue_elem *) ira_allocate (size);
1236 memset (update_cost_queue_elems, 0, size);
058e97ec
VM
1237 update_cost_check = 0;
1238}
1239
3b6d1699 1240/* Deallocate data used by function update_costs_from_copies. */
058e97ec
VM
1241static void
1242finish_cost_update (void)
1243{
0eeb2240 1244 ira_free (update_cost_queue_elems);
3b6d1699 1245 finish_update_cost_records ();
058e97ec
VM
1246}
1247
a7f32992
VM
1248/* When we traverse allocnos to update hard register costs, the cost
1249 divisor will be multiplied by the following macro value for each
1250 hop from given allocno to directly connected allocnos. */
1251#define COST_HOP_DIVISOR 4
1252
f754734f 1253/* Start a new cost-updating pass. */
058e97ec 1254static void
f754734f 1255start_update_cost (void)
058e97ec 1256{
f754734f
RS
1257 update_cost_check++;
1258 update_cost_queue = NULL;
1259}
058e97ec 1260
3b6d1699 1261/* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1756cb66 1262 ALLOCNO is already in the queue, or has NO_REGS class. */
f754734f 1263static inline void
3b6d1699 1264queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
f754734f
RS
1265{
1266 struct update_cost_queue_elem *elem;
1267
1268 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1269 if (elem->check != update_cost_check
1756cb66 1270 && ALLOCNO_CLASS (allocno) != NO_REGS)
058e97ec 1271 {
f754734f 1272 elem->check = update_cost_check;
3b6d1699 1273 elem->from = from;
f754734f
RS
1274 elem->divisor = divisor;
1275 elem->next = NULL;
1276 if (update_cost_queue == NULL)
1277 update_cost_queue = allocno;
058e97ec 1278 else
f754734f
RS
1279 update_cost_queue_tail->next = allocno;
1280 update_cost_queue_tail = elem;
058e97ec
VM
1281 }
1282}
1283
3b6d1699
VM
1284/* Try to remove the first element from update_cost_queue. Return
1285 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1286 *DIVISOR) describe the removed element. */
f754734f 1287static inline bool
3b6d1699 1288get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
058e97ec 1289{
f754734f
RS
1290 struct update_cost_queue_elem *elem;
1291
1292 if (update_cost_queue == NULL)
1293 return false;
1294
1295 *allocno = update_cost_queue;
1296 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
3b6d1699 1297 *from = elem->from;
f754734f
RS
1298 *divisor = elem->divisor;
1299 update_cost_queue = elem->next;
1300 return true;
058e97ec
VM
1301}
1302
86f0bef3
VM
1303/* Increase costs of HARD_REGNO by UPDATE_COST and conflict cost by
1304 UPDATE_CONFLICT_COST for ALLOCNO. Return true if we really
1305 modified the cost. */
3b6d1699 1306static bool
86f0bef3
VM
1307update_allocno_cost (ira_allocno_t allocno, int hard_regno,
1308 int update_cost, int update_conflict_cost)
3b6d1699
VM
1309{
1310 int i;
1311 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1312
1313 i = ira_class_hard_reg_index[aclass][hard_regno];
1314 if (i < 0)
1315 return false;
1316 ira_allocate_and_set_or_copy_costs
1317 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1318 ALLOCNO_UPDATED_CLASS_COST (allocno),
1319 ALLOCNO_HARD_REG_COSTS (allocno));
1320 ira_allocate_and_set_or_copy_costs
1321 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1322 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1323 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
86f0bef3 1324 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_conflict_cost;
3b6d1699
VM
1325 return true;
1326}
1327
1328/* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1329 by copies to ALLOCNO to increase chances to remove some copies as
1330 the result of subsequent assignment. Record cost updates if
1331 RECORD_P is true. */
a7f32992 1332static void
3b6d1699
VM
1333update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1334 int divisor, bool decr_p, bool record_p)
a7f32992 1335{
86f0bef3 1336 int cost, update_cost, update_conflict_cost;
ef4bddc2 1337 machine_mode mode;
1756cb66 1338 enum reg_class rclass, aclass;
3b6d1699 1339 ira_allocno_t another_allocno, from = NULL;
a7f32992
VM
1340 ira_copy_t cp, next_cp;
1341
f754734f 1342 rclass = REGNO_REG_CLASS (hard_regno);
f754734f 1343 do
a7f32992 1344 {
f754734f 1345 mode = ALLOCNO_MODE (allocno);
1756cb66 1346 ira_init_register_move_cost_if_necessary (mode);
f754734f 1347 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
a7f32992 1348 {
f754734f 1349 if (cp->first == allocno)
a7f32992 1350 {
f754734f
RS
1351 next_cp = cp->next_first_allocno_copy;
1352 another_allocno = cp->second;
1353 }
1354 else if (cp->second == allocno)
1355 {
1356 next_cp = cp->next_second_allocno_copy;
1357 another_allocno = cp->first;
a7f32992 1358 }
f754734f
RS
1359 else
1360 gcc_unreachable ();
1361
3b6d1699
VM
1362 if (another_allocno == from)
1363 continue;
1364
1756cb66
VM
1365 aclass = ALLOCNO_CLASS (another_allocno);
1366 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
6042d1dd 1367 hard_regno)
f754734f
RS
1368 || ALLOCNO_ASSIGNED_P (another_allocno))
1369 continue;
1370
b3ad445f
RS
1371 /* If we have different modes use the smallest one. It is
1372 a sub-register move. It is hard to predict what LRA
1373 will reload (the pseudo or its sub-register) but LRA
1374 will try to minimize the data movement. Also for some
1375 register classes bigger modes might be invalid,
1376 e.g. DImode for AREG on x86. For such cases the
1377 register move cost will be maximal. */
1378 mode = narrower_subreg_mode (mode, ALLOCNO_MODE (cp->second));
e2323a2b 1379
f754734f 1380 cost = (cp->second == allocno
1756cb66
VM
1381 ? ira_register_move_cost[mode][rclass][aclass]
1382 : ira_register_move_cost[mode][aclass][rclass]);
f754734f
RS
1383 if (decr_p)
1384 cost = -cost;
1385
86f0bef3
VM
1386 update_conflict_cost = update_cost = cp->freq * cost / divisor;
1387
1388 if (ALLOCNO_COLOR_DATA (another_allocno) != NULL
1389 && (ALLOCNO_COLOR_DATA (allocno)->first_thread_allocno
1390 != ALLOCNO_COLOR_DATA (another_allocno)->first_thread_allocno))
1391 /* Decrease conflict cost of ANOTHER_ALLOCNO if it is not
1392 in the same allocation thread. */
1393 update_conflict_cost /= COST_HOP_DIVISOR;
1394
f754734f
RS
1395 if (update_cost == 0)
1396 continue;
1397
86f0bef3
VM
1398 if (! update_allocno_cost (another_allocno, hard_regno,
1399 update_cost, update_conflict_cost))
1756cb66 1400 continue;
3b6d1699
VM
1401 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1402 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1403 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1404 = get_update_cost_record (hard_regno, divisor,
1405 ALLOCNO_COLOR_DATA (another_allocno)
1406 ->update_cost_records);
a7f32992 1407 }
a7f32992 1408 }
3b6d1699
VM
1409 while (get_next_update_cost (&allocno, &from, &divisor));
1410}
1411
1412/* Decrease preferred ALLOCNO hard register costs and costs of
1413 allocnos connected to ALLOCNO through copy. */
1414static void
1415update_costs_from_prefs (ira_allocno_t allocno)
1416{
1417 ira_pref_t pref;
1418
1419 start_update_cost ();
1420 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1421 update_costs_from_allocno (allocno, pref->hard_regno,
1422 COST_HOP_DIVISOR, true, true);
1423}
1424
1425/* Update (decrease if DECR_P) the cost of allocnos connected to
1426 ALLOCNO through copies to increase chances to remove some copies as
1427 the result of subsequent assignment. ALLOCNO was just assigned to
c73ccc80 1428 a hard register. Record cost updates if RECORD_P is true. */
3b6d1699 1429static void
c73ccc80 1430update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
3b6d1699
VM
1431{
1432 int hard_regno;
1433
1434 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1435 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1436 start_update_cost ();
c73ccc80 1437 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
3b6d1699
VM
1438}
1439
8c679205
VM
1440/* Update conflict_allocno_hard_prefs of allocnos conflicting with
1441 ALLOCNO. */
1442static void
1443update_conflict_allocno_hard_prefs (ira_allocno_t allocno)
1444{
1445 int l, nr = ALLOCNO_NUM_OBJECTS (allocno);
1446
1447 for (l = 0; l < nr; l++)
1448 {
1449 ira_object_t conflict_obj, obj = ALLOCNO_OBJECT (allocno, l);
1450 ira_object_conflict_iterator oci;
1451
1452 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1453 {
1454 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1455 allocno_color_data_t conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
1456 ira_pref_t pref;
1457
1458 if (!(hard_reg_set_intersect_p
1459 (ALLOCNO_COLOR_DATA (allocno)->profitable_hard_regs,
1460 conflict_data->profitable_hard_regs)))
1461 continue;
1462 for (pref = ALLOCNO_PREFS (allocno);
1463 pref != NULL;
1464 pref = pref->next_pref)
1465 conflict_data->conflict_allocno_hard_prefs += pref->freq;
1466 }
1467 }
1468}
1469
3b6d1699
VM
1470/* Restore costs of allocnos connected to ALLOCNO by copies as it was
1471 before updating costs of these allocnos from given allocno. This
1472 is a wise thing to do as if given allocno did not get an expected
1473 hard reg, using smaller cost of the hard reg for allocnos connected
1474 by copies to given allocno becomes actually misleading. Free all
1475 update cost records for ALLOCNO as we don't need them anymore. */
1476static void
1477restore_costs_from_copies (ira_allocno_t allocno)
1478{
1479 struct update_cost_record *records, *curr;
1480
1481 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1482 return;
1483 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1484 start_update_cost ();
1485 for (curr = records; curr != NULL; curr = curr->next)
1486 update_costs_from_allocno (allocno, curr->hard_regno,
1487 curr->divisor, true, false);
1488 free_update_cost_record_list (records);
1489 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
f754734f
RS
1490}
1491
7db7ed3c 1492/* This function updates COSTS (decrease if DECR_P) for hard_registers
1756cb66 1493 of ACLASS by conflict costs of the unassigned allocnos
7db7ed3c
VM
1494 connected by copies with allocnos in update_cost_queue. This
1495 update increases chances to remove some copies. */
f754734f 1496static void
1756cb66 1497update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
7db7ed3c 1498 bool decr_p)
f754734f
RS
1499{
1500 int i, cost, class_size, freq, mult, div, divisor;
7db7ed3c 1501 int index, hard_regno;
f754734f
RS
1502 int *conflict_costs;
1503 bool cont_p;
1756cb66 1504 enum reg_class another_aclass;
3b6d1699 1505 ira_allocno_t allocno, another_allocno, from;
f754734f
RS
1506 ira_copy_t cp, next_cp;
1507
3b6d1699 1508 while (get_next_update_cost (&allocno, &from, &divisor))
f754734f
RS
1509 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1510 {
1511 if (cp->first == allocno)
1512 {
1513 next_cp = cp->next_first_allocno_copy;
1514 another_allocno = cp->second;
1515 }
1516 else if (cp->second == allocno)
1517 {
1518 next_cp = cp->next_second_allocno_copy;
1519 another_allocno = cp->first;
1520 }
1521 else
1522 gcc_unreachable ();
3b6d1699
VM
1523
1524 if (another_allocno == from)
1525 continue;
1526
1756cb66
VM
1527 another_aclass = ALLOCNO_CLASS (another_allocno);
1528 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
f754734f 1529 || ALLOCNO_ASSIGNED_P (another_allocno)
1756cb66 1530 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
f754734f 1531 continue;
1756cb66 1532 class_size = ira_class_hard_regs_num[another_aclass];
f754734f
RS
1533 ira_allocate_and_copy_costs
1534 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1756cb66 1535 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
f754734f
RS
1536 conflict_costs
1537 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1538 if (conflict_costs == NULL)
1539 cont_p = true;
1540 else
1541 {
1542 mult = cp->freq;
1543 freq = ALLOCNO_FREQ (another_allocno);
1544 if (freq == 0)
1545 freq = 1;
1546 div = freq * divisor;
1547 cont_p = false;
1548 for (i = class_size - 1; i >= 0; i--)
1549 {
1756cb66 1550 hard_regno = ira_class_hard_regs[another_aclass][i];
7db7ed3c 1551 ira_assert (hard_regno >= 0);
1756cb66 1552 index = ira_class_hard_reg_index[aclass][hard_regno];
7db7ed3c
VM
1553 if (index < 0)
1554 continue;
7879aabe 1555 cost = (int) (((int64_t) conflict_costs [i] * mult) / div);
f754734f
RS
1556 if (cost == 0)
1557 continue;
1558 cont_p = true;
1559 if (decr_p)
1560 cost = -cost;
7db7ed3c 1561 costs[index] += cost;
f754734f
RS
1562 }
1563 }
1564 /* Probably 5 hops will be enough. */
1565 if (cont_p
1566 && divisor <= (COST_HOP_DIVISOR
1567 * COST_HOP_DIVISOR
1568 * COST_HOP_DIVISOR
1569 * COST_HOP_DIVISOR))
3b6d1699 1570 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
f754734f 1571 }
a7f32992
VM
1572}
1573
27508f5f
VM
1574/* Set up conflicting (through CONFLICT_REGS) for each object of
1575 allocno A and the start allocno profitable regs (through
1576 START_PROFITABLE_REGS). Remember that the start profitable regs
67914693 1577 exclude hard regs which cannot hold value of mode of allocno A.
27508f5f
VM
1578 This covers mostly cases when multi-register value should be
1579 aligned. */
1756cb66 1580static inline void
27508f5f
VM
1581get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1582 HARD_REG_SET *conflict_regs,
1583 HARD_REG_SET *start_profitable_regs)
1756cb66
VM
1584{
1585 int i, nwords;
1586 ira_object_t obj;
1587
1588 nwords = ALLOCNO_NUM_OBJECTS (a);
1589 for (i = 0; i < nwords; i++)
1590 {
1591 obj = ALLOCNO_OBJECT (a, i);
1592 COPY_HARD_REG_SET (conflict_regs[i],
1593 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1756cb66 1594 }
27508f5f
VM
1595 if (retry_p)
1596 {
1597 COPY_HARD_REG_SET (*start_profitable_regs,
1598 reg_class_contents[ALLOCNO_CLASS (a)]);
1599 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1600 ira_prohibited_class_mode_regs
1601 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1602 }
1603 else
1604 COPY_HARD_REG_SET (*start_profitable_regs,
1605 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1756cb66
VM
1606}
1607
27508f5f
VM
1608/* Return true if HARD_REGNO is ok for assigning to allocno A with
1609 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1756cb66
VM
1610static inline bool
1611check_hard_reg_p (ira_allocno_t a, int hard_regno,
27508f5f 1612 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1756cb66
VM
1613{
1614 int j, nwords, nregs;
8d189b3f 1615 enum reg_class aclass;
ef4bddc2 1616 machine_mode mode;
1756cb66 1617
8d189b3f
VM
1618 aclass = ALLOCNO_CLASS (a);
1619 mode = ALLOCNO_MODE (a);
1620 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1621 hard_regno))
1622 return false;
27508f5f
VM
1623 /* Checking only profitable hard regs. */
1624 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1625 return false;
ad474626 1626 nregs = hard_regno_nregs (hard_regno, mode);
1756cb66
VM
1627 nwords = ALLOCNO_NUM_OBJECTS (a);
1628 for (j = 0; j < nregs; j++)
1629 {
1630 int k;
1631 int set_to_test_start = 0, set_to_test_end = nwords;
1632
1633 if (nregs == nwords)
1634 {
2805e6c0 1635 if (REG_WORDS_BIG_ENDIAN)
1756cb66
VM
1636 set_to_test_start = nwords - j - 1;
1637 else
1638 set_to_test_start = j;
1639 set_to_test_end = set_to_test_start + 1;
1640 }
1641 for (k = set_to_test_start; k < set_to_test_end; k++)
27508f5f 1642 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1756cb66
VM
1643 break;
1644 if (k != set_to_test_end)
1645 break;
1646 }
1647 return j == nregs;
1648}
9181a6e5
VM
1649
1650/* Return number of registers needed to be saved and restored at
1651 function prologue/epilogue if we allocate HARD_REGNO to hold value
1652 of MODE. */
1653static int
ef4bddc2 1654calculate_saved_nregs (int hard_regno, machine_mode mode)
9181a6e5
VM
1655{
1656 int i;
1657 int nregs = 0;
1658
1659 ira_assert (hard_regno >= 0);
ad474626 1660 for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
9181a6e5
VM
1661 if (!allocated_hardreg_p[hard_regno + i]
1662 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1663 && !LOCAL_REGNO (hard_regno + i))
1664 nregs++;
1665 return nregs;
1666}
1756cb66 1667
22b0982c
VM
1668/* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1669 that the function called from function
1756cb66
VM
1670 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1671 this case some allocno data are not defined or updated and we
1672 should not touch these data. The function returns true if we
1673 managed to assign a hard register to the allocno.
1674
1675 To assign a hard register, first of all we calculate all conflict
1676 hard registers which can come from conflicting allocnos with
1677 already assigned hard registers. After that we find first free
1678 hard register with the minimal cost. During hard register cost
1679 calculation we take conflict hard register costs into account to
1680 give a chance for conflicting allocnos to get a better hard
1681 register in the future.
1682
1683 If the best hard register cost is bigger than cost of memory usage
1684 for the allocno, we don't assign a hard register to given allocno
1685 at all.
1686
1687 If we assign a hard register to the allocno, we update costs of the
1688 hard register for allocnos connected by copies to improve a chance
1689 to coalesce insns represented by the copies when we assign hard
1690 registers to the allocnos connected by the copies. */
058e97ec 1691static bool
22b0982c 1692assign_hard_reg (ira_allocno_t a, bool retry_p)
058e97ec 1693{
27508f5f 1694 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
fbddb81d 1695 int i, j, hard_regno, best_hard_regno, class_size;
22b0982c 1696 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
058e97ec 1697 int *a_costs;
1756cb66 1698 enum reg_class aclass;
ef4bddc2 1699 machine_mode mode;
058e97ec 1700 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
fbddb81d 1701 int saved_nregs;
a5c011cd
MP
1702 enum reg_class rclass;
1703 int add_cost;
058e97ec
VM
1704#ifdef STACK_REGS
1705 bool no_stack_reg_p;
1706#endif
1707
22b0982c 1708 ira_assert (! ALLOCNO_ASSIGNED_P (a));
27508f5f
VM
1709 get_conflict_and_start_profitable_regs (a, retry_p,
1710 conflicting_regs,
1711 &profitable_hard_regs);
1756cb66
VM
1712 aclass = ALLOCNO_CLASS (a);
1713 class_size = ira_class_hard_regs_num[aclass];
058e97ec
VM
1714 best_hard_regno = -1;
1715 memset (full_costs, 0, sizeof (int) * class_size);
1716 mem_cost = 0;
058e97ec
VM
1717 memset (costs, 0, sizeof (int) * class_size);
1718 memset (full_costs, 0, sizeof (int) * class_size);
1719#ifdef STACK_REGS
1720 no_stack_reg_p = false;
1721#endif
1756cb66
VM
1722 if (! retry_p)
1723 start_update_cost ();
22b0982c
VM
1724 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1725
1726 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1756cb66 1727 aclass, ALLOCNO_HARD_REG_COSTS (a));
22b0982c 1728 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
058e97ec 1729#ifdef STACK_REGS
22b0982c 1730 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
058e97ec 1731#endif
1756cb66 1732 cost = ALLOCNO_UPDATED_CLASS_COST (a);
22b0982c
VM
1733 for (i = 0; i < class_size; i++)
1734 if (a_costs != NULL)
1735 {
1736 costs[i] += a_costs[i];
1737 full_costs[i] += a_costs[i];
1738 }
1739 else
1740 {
1741 costs[i] += cost;
1742 full_costs[i] += cost;
1743 }
1756cb66 1744 nwords = ALLOCNO_NUM_OBJECTS (a);
27508f5f 1745 curr_allocno_process++;
22b0982c
VM
1746 for (word = 0; word < nwords; word++)
1747 {
1748 ira_object_t conflict_obj;
1749 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1750 ira_object_conflict_iterator oci;
1751
22b0982c
VM
1752 /* Take preferences of conflicting allocnos into account. */
1753 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1756cb66 1754 {
22b0982c 1755 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1756cb66 1756 enum reg_class conflict_aclass;
4ef20c29 1757 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a);
1756cb66 1758
22b0982c
VM
1759 /* Reload can give another class so we need to check all
1760 allocnos. */
1756cb66 1761 if (!retry_p
06fbce66
ZZ
1762 && ((!ALLOCNO_ASSIGNED_P (conflict_a)
1763 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1764 && !(hard_reg_set_intersect_p
1765 (profitable_hard_regs,
1766 ALLOCNO_COLOR_DATA
1767 (conflict_a)->profitable_hard_regs))))
1768 {
1769 /* All conflict allocnos are in consideration bitmap
1770 when retry_p is false. It might change in future and
1771 if it happens the assert will be broken. It means
1772 the code should be modified for the new
1773 assumptions. */
1774 ira_assert (bitmap_bit_p (consideration_allocno_bitmap,
1775 ALLOCNO_NUM (conflict_a)));
1776 continue;
1777 }
1756cb66 1778 conflict_aclass = ALLOCNO_CLASS (conflict_a);
22b0982c 1779 ira_assert (ira_reg_classes_intersect_p
1756cb66 1780 [aclass][conflict_aclass]);
22b0982c 1781 if (ALLOCNO_ASSIGNED_P (conflict_a))
fa86d337 1782 {
22b0982c
VM
1783 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1784 if (hard_regno >= 0
b8faca75
VM
1785 && (ira_hard_reg_set_intersection_p
1786 (hard_regno, ALLOCNO_MODE (conflict_a),
1787 reg_class_contents[aclass])))
fa86d337 1788 {
22b0982c 1789 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
4648deb4 1790 int conflict_nregs;
1756cb66 1791
4648deb4 1792 mode = ALLOCNO_MODE (conflict_a);
ad474626 1793 conflict_nregs = hard_regno_nregs (hard_regno, mode);
22b0982c 1794 if (conflict_nregs == n_objects && conflict_nregs > 1)
fa86d337 1795 {
22b0982c 1796 int num = OBJECT_SUBWORD (conflict_obj);
ac0ab4f7 1797
2805e6c0 1798 if (REG_WORDS_BIG_ENDIAN)
22b0982c
VM
1799 SET_HARD_REG_BIT (conflicting_regs[word],
1800 hard_regno + n_objects - num - 1);
1801 else
1802 SET_HARD_REG_BIT (conflicting_regs[word],
1803 hard_regno + num);
ac0ab4f7 1804 }
22b0982c
VM
1805 else
1806 IOR_HARD_REG_SET
1807 (conflicting_regs[word],
1808 ira_reg_mode_hard_regset[hard_regno][mode]);
27508f5f 1809 if (hard_reg_set_subset_p (profitable_hard_regs,
22b0982c
VM
1810 conflicting_regs[word]))
1811 goto fail;
fa86d337
BS
1812 }
1813 }
1756cb66 1814 else if (! retry_p
27508f5f
VM
1815 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1816 /* Don't process the conflict allocno twice. */
1817 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1818 != curr_allocno_process))
22b0982c
VM
1819 {
1820 int k, *conflict_costs;
1821
27508f5f
VM
1822 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1823 = curr_allocno_process;
22b0982c
VM
1824 ira_allocate_and_copy_costs
1825 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1756cb66 1826 conflict_aclass,
22b0982c
VM
1827 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1828 conflict_costs
1829 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1830 if (conflict_costs != NULL)
1831 for (j = class_size - 1; j >= 0; j--)
1832 {
1756cb66 1833 hard_regno = ira_class_hard_regs[aclass][j];
22b0982c 1834 ira_assert (hard_regno >= 0);
1756cb66 1835 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
4ef20c29
ZC
1836 if (k < 0
1837 /* If HARD_REGNO is not available for CONFLICT_A,
1838 the conflict would be ignored, since HARD_REGNO
1839 will never be assigned to CONFLICT_A. */
1840 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
1841 hard_regno))
22b0982c
VM
1842 continue;
1843 full_costs[j] -= conflict_costs[k];
1844 }
3b6d1699
VM
1845 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1846
22b0982c 1847 }
fa86d337 1848 }
058e97ec 1849 }
1756cb66
VM
1850 if (! retry_p)
1851 /* Take into account preferences of allocnos connected by copies to
1852 the conflict allocnos. */
1853 update_conflict_hard_regno_costs (full_costs, aclass, true);
f754734f 1854
a7f32992
VM
1855 /* Take preferences of allocnos connected by copies into
1856 account. */
1756cb66
VM
1857 if (! retry_p)
1858 {
1859 start_update_cost ();
3b6d1699 1860 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1756cb66
VM
1861 update_conflict_hard_regno_costs (full_costs, aclass, false);
1862 }
058e97ec
VM
1863 min_cost = min_full_cost = INT_MAX;
1864 /* We don't care about giving callee saved registers to allocnos no
1865 living through calls because call clobbered registers are
1866 allocated first (it is usual practice to put them first in
1867 REG_ALLOC_ORDER). */
1756cb66 1868 mode = ALLOCNO_MODE (a);
058e97ec
VM
1869 for (i = 0; i < class_size; i++)
1870 {
1756cb66 1871 hard_regno = ira_class_hard_regs[aclass][i];
058e97ec
VM
1872#ifdef STACK_REGS
1873 if (no_stack_reg_p
1874 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1875 continue;
1876#endif
1756cb66
VM
1877 if (! check_hard_reg_p (a, hard_regno,
1878 conflicting_regs, profitable_hard_regs))
058e97ec
VM
1879 continue;
1880 cost = costs[i];
1881 full_cost = full_costs[i];
ed15c598 1882 if (!HONOR_REG_ALLOC_ORDER)
058e97ec 1883 {
ed15c598
KC
1884 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1885 /* We need to save/restore the hard register in
1886 epilogue/prologue. Therefore we increase the cost. */
1887 {
1888 rclass = REGNO_REG_CLASS (hard_regno);
1889 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1890 + ira_memory_move_cost[mode][rclass][1])
ad474626
RS
1891 * saved_nregs / hard_regno_nregs (hard_regno,
1892 mode) - 1);
ed15c598
KC
1893 cost += add_cost;
1894 full_cost += add_cost;
1895 }
058e97ec
VM
1896 }
1897 if (min_cost > cost)
1898 min_cost = cost;
1899 if (min_full_cost > full_cost)
1900 {
1901 min_full_cost = full_cost;
1902 best_hard_regno = hard_regno;
1903 ira_assert (hard_regno >= 0);
1904 }
1905 }
b81a2f0d
VM
1906 if (min_full_cost > mem_cost
1907 /* Do not spill static chain pointer pseudo when non-local goto
1908 is used. */
1909 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
058e97ec
VM
1910 {
1911 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1912 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1913 mem_cost, min_full_cost);
1914 best_hard_regno = -1;
1915 }
1916 fail:
058e97ec 1917 if (best_hard_regno >= 0)
9181a6e5 1918 {
ad474626 1919 for (i = hard_regno_nregs (best_hard_regno, mode) - 1; i >= 0; i--)
34672f15 1920 allocated_hardreg_p[best_hard_regno + i] = true;
9181a6e5 1921 }
c73ccc80
VM
1922 if (! retry_p)
1923 restore_costs_from_copies (a);
22b0982c
VM
1924 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1925 ALLOCNO_ASSIGNED_P (a) = true;
1926 if (best_hard_regno >= 0)
c73ccc80 1927 update_costs_from_copies (a, true, ! retry_p);
1756cb66 1928 ira_assert (ALLOCNO_CLASS (a) == aclass);
2b9c63a2 1929 /* We don't need updated costs anymore. */
22b0982c 1930 ira_free_allocno_updated_costs (a);
058e97ec
VM
1931 return best_hard_regno >= 0;
1932}
1933
1934\f
1935
bf08fb16
VM
1936/* An array used to sort copies. */
1937static ira_copy_t *sorted_copies;
1938
0550a77b
VM
1939/* If allocno A is a cap, return non-cap allocno from which A is
1940 created. Otherwise, return A. */
1941static ira_allocno_t
1942get_cap_member (ira_allocno_t a)
1943{
1944 ira_allocno_t member;
1945
1946 while ((member = ALLOCNO_CAP_MEMBER (a)) != NULL)
1947 a = member;
1948 return a;
1949}
1950
bf08fb16
VM
1951/* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1952 used to find a conflict for new allocnos or allocnos with the
1953 different allocno classes. */
1954static bool
1955allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1956{
1957 rtx reg1, reg2;
1958 int i, j;
1959 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1960 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1961
1962 if (a1 == a2)
1963 return false;
1964 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1965 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1966 if (reg1 != NULL && reg2 != NULL
1967 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1968 return false;
1969
0550a77b
VM
1970 /* We don't keep live ranges for caps because they can be quite big.
1971 Use ranges of non-cap allocno from which caps are created. */
1972 a1 = get_cap_member (a1);
1973 a2 = get_cap_member (a2);
bf08fb16
VM
1974 for (i = 0; i < n1; i++)
1975 {
1976 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1977
1978 for (j = 0; j < n2; j++)
1979 {
1980 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1981
1982 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1983 OBJECT_LIVE_RANGES (c2)))
1984 return true;
1985 }
1986 }
1987 return false;
1988}
1989
1990/* The function is used to sort copies according to their execution
1991 frequencies. */
1992static int
1993copy_freq_compare_func (const void *v1p, const void *v2p)
1994{
1995 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1996 int pri1, pri2;
1997
1998 pri1 = cp1->freq;
1999 pri2 = cp2->freq;
2000 if (pri2 - pri1)
2001 return pri2 - pri1;
2002
df3e3493 2003 /* If frequencies are equal, sort by copies, so that the results of
bf08fb16
VM
2004 qsort leave nothing to chance. */
2005 return cp1->num - cp2->num;
2006}
2007
2008\f
2009
2010/* Return true if any allocno from thread of A1 conflicts with any
2011 allocno from thread A2. */
2012static bool
2013allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
2014{
2015 ira_allocno_t a, conflict_a;
2016
2017 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
2018 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2019 {
2020 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
2021 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
2022 {
2023 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
2024 return true;
2025 if (conflict_a == a1)
2026 break;
2027 }
2028 if (a == a2)
2029 break;
2030 }
2031 return false;
2032}
2033
2034/* Merge two threads given correspondingly by their first allocnos T1
2035 and T2 (more accurately merging T2 into T1). */
2036static void
2037merge_threads (ira_allocno_t t1, ira_allocno_t t2)
2038{
2039 ira_allocno_t a, next, last;
2040
2041 gcc_assert (t1 != t2
2042 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
2043 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
2044 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
2045 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2046 {
2047 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
2048 if (a == t2)
2049 break;
2050 last = a;
2051 }
2052 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
2053 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
2054 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
2055 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
2056}
2057
df3e3493 2058/* Create threads by processing CP_NUM copies from sorted copies. We
bf08fb16
VM
2059 process the most expensive copies first. */
2060static void
2061form_threads_from_copies (int cp_num)
2062{
2063 ira_allocno_t a, thread1, thread2;
2064 ira_copy_t cp;
2065 int i, n;
2066
2067 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2068 /* Form threads processing copies, most frequently executed
2069 first. */
2070 for (; cp_num != 0;)
2071 {
2072 for (i = 0; i < cp_num; i++)
2073 {
2074 cp = sorted_copies[i];
2075 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
2076 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2077 if (thread1 == thread2)
2078 continue;
2079 if (! allocno_thread_conflict_p (thread1, thread2))
2080 {
2081 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2082 fprintf
2083 (ira_dump_file,
2084 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2085 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2086 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2087 cp->freq);
2088 merge_threads (thread1, thread2);
2089 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2090 {
2091 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2092 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2093 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2094 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2095 ALLOCNO_FREQ (thread1));
2096 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2097 a != thread1;
2098 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2099 fprintf (ira_dump_file, " a%dr%d(%d)",
2100 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2101 ALLOCNO_FREQ (a));
2102 fprintf (ira_dump_file, "\n");
2103 }
2104 i++;
2105 break;
2106 }
2107 }
2108 /* Collect the rest of copies. */
2109 for (n = 0; i < cp_num; i++)
2110 {
2111 cp = sorted_copies[i];
2112 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2113 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2114 sorted_copies[n++] = cp;
2115 }
2116 cp_num = n;
2117 }
2118}
2119
2120/* Create threads by processing copies of all alocnos from BUCKET. We
2121 process the most expensive copies first. */
2122static void
2123form_threads_from_bucket (ira_allocno_t bucket)
2124{
2125 ira_allocno_t a;
2126 ira_copy_t cp, next_cp;
2127 int cp_num = 0;
2128
2129 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2130 {
2131 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2132 {
2133 if (cp->first == a)
2134 {
2135 next_cp = cp->next_first_allocno_copy;
2136 sorted_copies[cp_num++] = cp;
2137 }
2138 else if (cp->second == a)
2139 next_cp = cp->next_second_allocno_copy;
2140 else
2141 gcc_unreachable ();
2142 }
2143 }
2144 form_threads_from_copies (cp_num);
2145}
2146
2147/* Create threads by processing copies of colorable allocno A. We
2148 process most expensive copies first. */
2149static void
2150form_threads_from_colorable_allocno (ira_allocno_t a)
2151{
2152 ira_allocno_t another_a;
2153 ira_copy_t cp, next_cp;
2154 int cp_num = 0;
2155
2156 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2157 {
2158 if (cp->first == a)
2159 {
2160 next_cp = cp->next_first_allocno_copy;
2161 another_a = cp->second;
2162 }
2163 else if (cp->second == a)
2164 {
2165 next_cp = cp->next_second_allocno_copy;
2166 another_a = cp->first;
2167 }
2168 else
2169 gcc_unreachable ();
2170 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2171 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2172 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2173 sorted_copies[cp_num++] = cp;
2174 }
2175 form_threads_from_copies (cp_num);
2176}
2177
2178/* Form initial threads which contain only one allocno. */
2179static void
2180init_allocno_threads (void)
2181{
2182 ira_allocno_t a;
2183 unsigned int j;
2184 bitmap_iterator bi;
2185
2186 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2187 {
2188 a = ira_allocnos[j];
2189 /* Set up initial thread data: */
2190 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2191 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2192 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2193 }
2194}
2195
2196\f
2197
058e97ec
VM
2198/* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2199
2200/* Bucket of allocnos that can colored currently without spilling. */
2201static ira_allocno_t colorable_allocno_bucket;
2202
2203/* Bucket of allocnos that might be not colored currently without
2204 spilling. */
2205static ira_allocno_t uncolorable_allocno_bucket;
2206
1756cb66
VM
2207/* The current number of allocnos in the uncolorable_bucket. */
2208static int uncolorable_allocnos_num;
058e97ec 2209
30ea859e
VM
2210/* Return the current spill priority of allocno A. The less the
2211 number, the more preferable the allocno for spilling. */
1756cb66 2212static inline int
30ea859e
VM
2213allocno_spill_priority (ira_allocno_t a)
2214{
1756cb66
VM
2215 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2216
2217 return (data->temp
2218 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2219 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
30ea859e
VM
2220 + 1));
2221}
2222
1756cb66 2223/* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
058e97ec
VM
2224 before the call. */
2225static void
1756cb66 2226add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
058e97ec 2227{
1756cb66
VM
2228 ira_allocno_t first_a;
2229 allocno_color_data_t data;
058e97ec
VM
2230
2231 if (bucket_ptr == &uncolorable_allocno_bucket
1756cb66 2232 && ALLOCNO_CLASS (a) != NO_REGS)
058e97ec 2233 {
1756cb66
VM
2234 uncolorable_allocnos_num++;
2235 ira_assert (uncolorable_allocnos_num > 0);
058e97ec 2236 }
1756cb66
VM
2237 first_a = *bucket_ptr;
2238 data = ALLOCNO_COLOR_DATA (a);
2239 data->next_bucket_allocno = first_a;
2240 data->prev_bucket_allocno = NULL;
2241 if (first_a != NULL)
2242 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2243 *bucket_ptr = a;
058e97ec
VM
2244}
2245
058e97ec
VM
2246/* Compare two allocnos to define which allocno should be pushed first
2247 into the coloring stack. If the return is a negative number, the
2248 allocno given by the first parameter will be pushed first. In this
2249 case such allocno has less priority than the second one and the
2250 hard register will be assigned to it after assignment to the second
2251 one. As the result of such assignment order, the second allocno
2252 has a better chance to get the best hard register. */
2253static int
2254bucket_allocno_compare_func (const void *v1p, const void *v2p)
2255{
2256 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2257 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
8c679205 2258 int diff, freq1, freq2, a1_num, a2_num, pref1, pref2;
bf08fb16
VM
2259 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2260 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
9c3b0346
VM
2261 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2262
bf08fb16
VM
2263 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2264 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2265 if ((diff = freq1 - freq2) != 0)
2266 return diff;
2267
2268 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2269 return diff;
2270
9c3b0346
VM
2271 /* Push pseudos requiring less hard registers first. It means that
2272 we will assign pseudos requiring more hard registers first
2273 avoiding creation small holes in free hard register file into
67914693 2274 which the pseudos requiring more hard registers cannot fit. */
9c3b0346
VM
2275 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2276 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
058e97ec 2277 return diff;
bf08fb16
VM
2278
2279 freq1 = ALLOCNO_FREQ (a1);
2280 freq2 = ALLOCNO_FREQ (a2);
2281 if ((diff = freq1 - freq2) != 0)
058e97ec 2282 return diff;
bf08fb16 2283
1756cb66
VM
2284 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2285 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2286 if ((diff = a2_num - a1_num) != 0)
99710245 2287 return diff;
8c679205
VM
2288 /* Push allocnos with minimal conflict_allocno_hard_prefs first. */
2289 pref1 = ALLOCNO_COLOR_DATA (a1)->conflict_allocno_hard_prefs;
2290 pref2 = ALLOCNO_COLOR_DATA (a2)->conflict_allocno_hard_prefs;
2291 if ((diff = pref1 - pref2) != 0)
2292 return diff;
058e97ec
VM
2293 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2294}
2295
2296/* Sort bucket *BUCKET_PTR and return the result through
2297 BUCKET_PTR. */
2298static void
1756cb66
VM
2299sort_bucket (ira_allocno_t *bucket_ptr,
2300 int (*compare_func) (const void *, const void *))
058e97ec
VM
2301{
2302 ira_allocno_t a, head;
2303 int n;
2304
1756cb66
VM
2305 for (n = 0, a = *bucket_ptr;
2306 a != NULL;
2307 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
058e97ec
VM
2308 sorted_allocnos[n++] = a;
2309 if (n <= 1)
2310 return;
1756cb66 2311 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
058e97ec
VM
2312 head = NULL;
2313 for (n--; n >= 0; n--)
2314 {
2315 a = sorted_allocnos[n];
1756cb66
VM
2316 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2317 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
058e97ec 2318 if (head != NULL)
1756cb66 2319 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
058e97ec
VM
2320 head = a;
2321 }
2322 *bucket_ptr = head;
2323}
2324
bf08fb16 2325/* Add ALLOCNO to colorable bucket maintaining the order according
058e97ec
VM
2326 their priority. ALLOCNO should be not in a bucket before the
2327 call. */
2328static void
bf08fb16 2329add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
058e97ec
VM
2330{
2331 ira_allocno_t before, after;
058e97ec 2332
bf08fb16
VM
2333 form_threads_from_colorable_allocno (allocno);
2334 for (before = colorable_allocno_bucket, after = NULL;
058e97ec 2335 before != NULL;
1756cb66
VM
2336 after = before,
2337 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
058e97ec
VM
2338 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2339 break;
1756cb66
VM
2340 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2341 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
058e97ec 2342 if (after == NULL)
bf08fb16 2343 colorable_allocno_bucket = allocno;
058e97ec 2344 else
1756cb66 2345 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
058e97ec 2346 if (before != NULL)
1756cb66 2347 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
058e97ec
VM
2348}
2349
2350/* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2351 the call. */
2352static void
2353delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2354{
2355 ira_allocno_t prev_allocno, next_allocno;
058e97ec
VM
2356
2357 if (bucket_ptr == &uncolorable_allocno_bucket
1756cb66 2358 && ALLOCNO_CLASS (allocno) != NO_REGS)
058e97ec 2359 {
1756cb66
VM
2360 uncolorable_allocnos_num--;
2361 ira_assert (uncolorable_allocnos_num >= 0);
058e97ec 2362 }
1756cb66
VM
2363 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2364 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
058e97ec 2365 if (prev_allocno != NULL)
1756cb66 2366 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
058e97ec
VM
2367 else
2368 {
2369 ira_assert (*bucket_ptr == allocno);
2370 *bucket_ptr = next_allocno;
2371 }
2372 if (next_allocno != NULL)
1756cb66 2373 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
058e97ec
VM
2374}
2375
22b0982c 2376/* Put allocno A onto the coloring stack without removing it from its
058e97ec
VM
2377 bucket. Pushing allocno to the coloring stack can result in moving
2378 conflicting allocnos from the uncolorable bucket to the colorable
8c679205
VM
2379 one. Update conflict_allocno_hard_prefs of the conflicting
2380 allocnos which are not on stack yet. */
058e97ec 2381static void
22b0982c 2382push_allocno_to_stack (ira_allocno_t a)
058e97ec 2383{
1756cb66
VM
2384 enum reg_class aclass;
2385 allocno_color_data_t data, conflict_data;
2386 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2387
2388 data = ALLOCNO_COLOR_DATA (a);
2389 data->in_graph_p = false;
9771b263 2390 allocno_stack_vec.safe_push (a);
1756cb66
VM
2391 aclass = ALLOCNO_CLASS (a);
2392 if (aclass == NO_REGS)
058e97ec 2393 return;
1756cb66
VM
2394 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2395 if (n > 1)
ac0ab4f7
BS
2396 {
2397 /* We will deal with the subwords individually. */
22b0982c 2398 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
ac0ab4f7
BS
2399 size = 1;
2400 }
22b0982c 2401 for (i = 0; i < n; i++)
058e97ec 2402 {
22b0982c 2403 ira_object_t obj = ALLOCNO_OBJECT (a, i);
22b0982c
VM
2404 ira_object_t conflict_obj;
2405 ira_object_conflict_iterator oci;
2406
2407 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
548a6322 2408 {
22b0982c 2409 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
8c679205
VM
2410 ira_pref_t pref;
2411
1756cb66 2412 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
8c679205 2413 if (! conflict_data->in_graph_p
1756cb66
VM
2414 || ALLOCNO_ASSIGNED_P (conflict_a)
2415 || !(hard_reg_set_intersect_p
27508f5f
VM
2416 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2417 conflict_data->profitable_hard_regs)))
22b0982c 2418 continue;
8c679205
VM
2419 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
2420 conflict_data->conflict_allocno_hard_prefs -= pref->freq;
2421 if (conflict_data->colorable_p)
2422 continue;
1756cb66
VM
2423 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2424 ALLOCNO_NUM (conflict_a)));
27508f5f 2425 if (update_left_conflict_sizes_p (conflict_a, a, size))
22b0982c
VM
2426 {
2427 delete_allocno_from_bucket
27508f5f 2428 (conflict_a, &uncolorable_allocno_bucket);
bf08fb16 2429 add_allocno_to_ordered_colorable_bucket (conflict_a);
1756cb66
VM
2430 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2431 {
2432 fprintf (ira_dump_file, " Making");
2433 ira_print_expanded_allocno (conflict_a);
2434 fprintf (ira_dump_file, " colorable\n");
2435 }
548a6322 2436 }
1756cb66 2437
548a6322 2438 }
058e97ec
VM
2439 }
2440}
2441
2442/* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2443 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2444static void
2445remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2446{
058e97ec
VM
2447 if (colorable_p)
2448 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2449 else
2450 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2451 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2452 {
2453 fprintf (ira_dump_file, " Pushing");
22b0982c 2454 ira_print_expanded_allocno (allocno);
30ea859e 2455 if (colorable_p)
1756cb66
VM
2456 fprintf (ira_dump_file, "(cost %d)\n",
2457 ALLOCNO_COLOR_DATA (allocno)->temp);
30ea859e
VM
2458 else
2459 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2460 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
1756cb66
VM
2461 allocno_spill_priority (allocno),
2462 ALLOCNO_COLOR_DATA (allocno)->temp);
2463 }
058e97ec 2464 if (! colorable_p)
1756cb66 2465 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
548a6322 2466 push_allocno_to_stack (allocno);
058e97ec
VM
2467}
2468
2469/* Put all allocnos from colorable bucket onto the coloring stack. */
2470static void
2471push_only_colorable (void)
2472{
bf08fb16 2473 form_threads_from_bucket (colorable_allocno_bucket);
1756cb66 2474 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
058e97ec
VM
2475 for (;colorable_allocno_bucket != NULL;)
2476 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2477}
2478
058e97ec 2479/* Return the frequency of exit edges (if EXIT_P) or entry from/to the
b8698a0f 2480 loop given by its LOOP_NODE. */
058e97ec
VM
2481int
2482ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2483{
2484 int freq, i;
2485 edge_iterator ei;
2486 edge e;
9771b263 2487 vec<edge> edges;
058e97ec 2488
2608d841 2489 ira_assert (current_loops != NULL && loop_node->loop != NULL
058e97ec
VM
2490 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2491 freq = 0;
2492 if (! exit_p)
2493 {
2494 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2495 if (e->src != loop_node->loop->latch
2496 && (regno < 0
bf744527
SB
2497 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2498 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
058e97ec
VM
2499 freq += EDGE_FREQUENCY (e);
2500 }
2501 else
2502 {
2503 edges = get_loop_exit_edges (loop_node->loop);
9771b263 2504 FOR_EACH_VEC_ELT (edges, i, e)
058e97ec 2505 if (regno < 0
bf744527
SB
2506 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2507 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
058e97ec 2508 freq += EDGE_FREQUENCY (e);
9771b263 2509 edges.release ();
058e97ec
VM
2510 }
2511
2512 return REG_FREQ_FROM_EDGE_FREQ (freq);
2513}
2514
2515/* Calculate and return the cost of putting allocno A into memory. */
2516static int
2517calculate_allocno_spill_cost (ira_allocno_t a)
2518{
2519 int regno, cost;
ef4bddc2 2520 machine_mode mode;
058e97ec
VM
2521 enum reg_class rclass;
2522 ira_allocno_t parent_allocno;
2523 ira_loop_tree_node_t parent_node, loop_node;
2524
2525 regno = ALLOCNO_REGNO (a);
1756cb66 2526 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
058e97ec
VM
2527 if (ALLOCNO_CAP (a) != NULL)
2528 return cost;
2529 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2530 if ((parent_node = loop_node->parent) == NULL)
2531 return cost;
2532 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2533 return cost;
2534 mode = ALLOCNO_MODE (a);
1756cb66 2535 rclass = ALLOCNO_CLASS (a);
058e97ec
VM
2536 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2537 cost -= (ira_memory_move_cost[mode][rclass][0]
2538 * ira_loop_edge_freq (loop_node, regno, true)
2539 + ira_memory_move_cost[mode][rclass][1]
2540 * ira_loop_edge_freq (loop_node, regno, false));
2541 else
1756cb66
VM
2542 {
2543 ira_init_register_move_cost_if_necessary (mode);
2544 cost += ((ira_memory_move_cost[mode][rclass][1]
2545 * ira_loop_edge_freq (loop_node, regno, true)
2546 + ira_memory_move_cost[mode][rclass][0]
2547 * ira_loop_edge_freq (loop_node, regno, false))
2548 - (ira_register_move_cost[mode][rclass][rclass]
2549 * (ira_loop_edge_freq (loop_node, regno, false)
2550 + ira_loop_edge_freq (loop_node, regno, true))));
2551 }
058e97ec
VM
2552 return cost;
2553}
2554
1756cb66
VM
2555/* Used for sorting allocnos for spilling. */
2556static inline int
2557allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
058e97ec
VM
2558{
2559 int pri1, pri2, diff;
b8698a0f 2560
b81a2f0d
VM
2561 /* Avoid spilling static chain pointer pseudo when non-local goto is
2562 used. */
2563 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))
2564 return 1;
2565 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)))
2566 return -1;
1756cb66
VM
2567 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2568 return 1;
2569 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2570 return -1;
2571 pri1 = allocno_spill_priority (a1);
2572 pri2 = allocno_spill_priority (a2);
058e97ec
VM
2573 if ((diff = pri1 - pri2) != 0)
2574 return diff;
1756cb66
VM
2575 if ((diff
2576 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
058e97ec
VM
2577 return diff;
2578 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2579}
2580
1756cb66
VM
2581/* Used for sorting allocnos for spilling. */
2582static int
2583allocno_spill_sort_compare (const void *v1p, const void *v2p)
99710245 2584{
1756cb66
VM
2585 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2586 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
99710245 2587
1756cb66 2588 return allocno_spill_priority_compare (p1, p2);
058e97ec
VM
2589}
2590
2591/* Push allocnos to the coloring stack. The order of allocnos in the
1756cb66
VM
2592 stack defines the order for the subsequent coloring. */
2593static void
2594push_allocnos_to_stack (void)
2595{
2596 ira_allocno_t a;
2597 int cost;
2598
2599 /* Calculate uncolorable allocno spill costs. */
2600 for (a = uncolorable_allocno_bucket;
2601 a != NULL;
2602 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2603 if (ALLOCNO_CLASS (a) != NO_REGS)
2604 {
2605 cost = calculate_allocno_spill_cost (a);
2606 /* ??? Remove cost of copies between the coalesced
2607 allocnos. */
2608 ALLOCNO_COLOR_DATA (a)->temp = cost;
2609 }
2610 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2611 for (;;)
2612 {
2613 push_only_colorable ();
2614 a = uncolorable_allocno_bucket;
2615 if (a == NULL)
2616 break;
2617 remove_allocno_from_bucket_and_push (a, false);
058e97ec
VM
2618 }
2619 ira_assert (colorable_allocno_bucket == NULL
2620 && uncolorable_allocno_bucket == NULL);
1756cb66 2621 ira_assert (uncolorable_allocnos_num == 0);
058e97ec
VM
2622}
2623
2624/* Pop the coloring stack and assign hard registers to the popped
2625 allocnos. */
2626static void
2627pop_allocnos_from_stack (void)
2628{
2629 ira_allocno_t allocno;
1756cb66 2630 enum reg_class aclass;
058e97ec 2631
9771b263 2632 for (;allocno_stack_vec.length () != 0;)
058e97ec 2633 {
9771b263 2634 allocno = allocno_stack_vec.pop ();
1756cb66 2635 aclass = ALLOCNO_CLASS (allocno);
058e97ec
VM
2636 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2637 {
2638 fprintf (ira_dump_file, " Popping");
22b0982c 2639 ira_print_expanded_allocno (allocno);
058e97ec
VM
2640 fprintf (ira_dump_file, " -- ");
2641 }
1756cb66 2642 if (aclass == NO_REGS)
058e97ec
VM
2643 {
2644 ALLOCNO_HARD_REGNO (allocno) = -1;
2645 ALLOCNO_ASSIGNED_P (allocno) = true;
2646 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2647 ira_assert
2648 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2649 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2650 fprintf (ira_dump_file, "assign memory\n");
2651 }
2652 else if (assign_hard_reg (allocno, false))
2653 {
2654 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2655 fprintf (ira_dump_file, "assign reg %d\n",
2656 ALLOCNO_HARD_REGNO (allocno));
2657 }
2658 else if (ALLOCNO_ASSIGNED_P (allocno))
2659 {
2660 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3b6d1699
VM
2661 fprintf (ira_dump_file, "spill%s\n",
2662 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2663 ? "" : "!");
058e97ec 2664 }
1756cb66 2665 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
ac0ab4f7
BS
2666 }
2667}
2668
22b0982c 2669/* Set up number of available hard registers for allocno A. */
058e97ec 2670static void
22b0982c 2671setup_allocno_available_regs_num (ira_allocno_t a)
058e97ec 2672{
27508f5f 2673 int i, n, hard_regno, hard_regs_num, nwords;
1756cb66 2674 enum reg_class aclass;
1756cb66 2675 allocno_color_data_t data;
058e97ec 2676
1756cb66
VM
2677 aclass = ALLOCNO_CLASS (a);
2678 data = ALLOCNO_COLOR_DATA (a);
2679 data->available_regs_num = 0;
2680 if (aclass == NO_REGS)
058e97ec 2681 return;
1756cb66 2682 hard_regs_num = ira_class_hard_regs_num[aclass];
1756cb66 2683 nwords = ALLOCNO_NUM_OBJECTS (a);
058e97ec 2684 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
478ab26d 2685 {
1756cb66 2686 hard_regno = ira_class_hard_regs[aclass][i];
27508f5f
VM
2687 /* Checking only profitable hard regs. */
2688 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
478ab26d
VM
2689 n++;
2690 }
1756cb66
VM
2691 data->available_regs_num = n;
2692 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2693 return;
2694 fprintf
2695 (ira_dump_file,
27508f5f 2696 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
1756cb66
VM
2697 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2698 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
27508f5f
VM
2699 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2700 fprintf (ira_dump_file, ", %snode: ",
2701 hard_reg_set_equal_p (data->profitable_hard_regs,
2702 data->hard_regs_node->hard_regs->set)
2703 ? "" : "^");
2704 print_hard_reg_set (ira_dump_file,
2705 data->hard_regs_node->hard_regs->set, false);
1756cb66 2706 for (i = 0; i < nwords; i++)
22b0982c 2707 {
1756cb66 2708 ira_object_t obj = ALLOCNO_OBJECT (a, i);
ac0ab4f7 2709
1756cb66 2710 if (nwords != 1)
22b0982c 2711 {
1756cb66
VM
2712 if (i != 0)
2713 fprintf (ira_dump_file, ", ");
2714 fprintf (ira_dump_file, " obj %d", i);
22b0982c 2715 }
1756cb66
VM
2716 fprintf (ira_dump_file, " (confl regs = ");
2717 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2718 false);
27508f5f 2719 fprintf (ira_dump_file, ")");
22b0982c 2720 }
1756cb66 2721 fprintf (ira_dump_file, "\n");
058e97ec
VM
2722}
2723
2724/* Put ALLOCNO in a bucket corresponding to its number and size of its
2725 conflicting allocnos and hard registers. */
2726static void
2727put_allocno_into_bucket (ira_allocno_t allocno)
2728{
1756cb66 2729 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
058e97ec 2730 setup_allocno_available_regs_num (allocno);
1756cb66 2731 if (setup_left_conflict_sizes_p (allocno))
548a6322 2732 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
058e97ec 2733 else
548a6322 2734 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
058e97ec
VM
2735}
2736
22b0982c
VM
2737/* Map: allocno number -> allocno priority. */
2738static int *allocno_priorities;
058e97ec 2739
22b0982c
VM
2740/* Set up priorities for N allocnos in array
2741 CONSIDERATION_ALLOCNOS. */
058e97ec 2742static void
22b0982c 2743setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
058e97ec 2744{
22b0982c
VM
2745 int i, length, nrefs, priority, max_priority, mult;
2746 ira_allocno_t a;
058e97ec 2747
22b0982c
VM
2748 max_priority = 0;
2749 for (i = 0; i < n; i++)
7db7ed3c
VM
2750 {
2751 a = consideration_allocnos[i];
2752 nrefs = ALLOCNO_NREFS (a);
2753 ira_assert (nrefs >= 0);
2754 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2755 ira_assert (mult >= 0);
2756 allocno_priorities[ALLOCNO_NUM (a)]
2757 = priority
2758 = (mult
1756cb66
VM
2759 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2760 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
7db7ed3c
VM
2761 if (priority < 0)
2762 priority = -priority;
2763 if (max_priority < priority)
2764 max_priority = priority;
2765 }
2766 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2767 for (i = 0; i < n; i++)
2768 {
2769 a = consideration_allocnos[i];
2770 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
ac0ab4f7
BS
2771 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2772 length /= ALLOCNO_NUM_OBJECTS (a);
7db7ed3c
VM
2773 if (length <= 0)
2774 length = 1;
2775 allocno_priorities[ALLOCNO_NUM (a)]
2776 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2777 }
2778}
2779
1756cb66
VM
2780/* Sort allocnos according to the profit of usage of a hard register
2781 instead of memory for them. */
2782static int
2783allocno_cost_compare_func (const void *v1p, const void *v2p)
2784{
2785 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2786 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2787 int c1, c2;
2788
2789 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2790 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2791 if (c1 - c2)
2792 return c1 - c2;
2793
2794 /* If regs are equally good, sort by allocno numbers, so that the
2795 results of qsort leave nothing to chance. */
2796 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2797}
2798
da178d56
VM
2799/* Return savings on removed copies when ALLOCNO is assigned to
2800 HARD_REGNO. */
2801static int
2802allocno_copy_cost_saving (ira_allocno_t allocno, int hard_regno)
2803{
2804 int cost = 0;
b8506a8a 2805 machine_mode allocno_mode = ALLOCNO_MODE (allocno);
da178d56
VM
2806 enum reg_class rclass;
2807 ira_copy_t cp, next_cp;
2808
2809 rclass = REGNO_REG_CLASS (hard_regno);
c4b1942c
VM
2810 if (ira_reg_class_max_nregs[rclass][allocno_mode]
2811 > ira_class_hard_regs_num[rclass])
2812 /* For the above condition the cost can be wrong. Use the allocno
2813 class in this case. */
2814 rclass = ALLOCNO_CLASS (allocno);
da178d56
VM
2815 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
2816 {
2817 if (cp->first == allocno)
2818 {
2819 next_cp = cp->next_first_allocno_copy;
2820 if (ALLOCNO_HARD_REGNO (cp->second) != hard_regno)
2821 continue;
2822 }
2823 else if (cp->second == allocno)
2824 {
2825 next_cp = cp->next_second_allocno_copy;
2826 if (ALLOCNO_HARD_REGNO (cp->first) != hard_regno)
2827 continue;
2828 }
2829 else
2830 gcc_unreachable ();
c4b1942c 2831 cost += cp->freq * ira_register_move_cost[allocno_mode][rclass][rclass];
da178d56
VM
2832 }
2833 return cost;
2834}
2835
1756cb66
VM
2836/* We used Chaitin-Briggs coloring to assign as many pseudos as
2837 possible to hard registers. Let us try to improve allocation with
2838 cost point of view. This function improves the allocation by
2839 spilling some allocnos and assigning the freed hard registers to
2840 other allocnos if it decreases the overall allocation cost. */
2841static void
2842improve_allocation (void)
2843{
2844 unsigned int i;
2845 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2846 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2847 bool try_p;
2848 enum reg_class aclass;
ef4bddc2 2849 machine_mode mode;
1756cb66
VM
2850 int *allocno_costs;
2851 int costs[FIRST_PSEUDO_REGISTER];
27508f5f 2852 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1756cb66
VM
2853 ira_allocno_t a;
2854 bitmap_iterator bi;
2855
b81a2f0d
VM
2856 /* Don't bother to optimize the code with static chain pointer and
2857 non-local goto in order not to spill the chain pointer
2858 pseudo. */
2859 if (cfun->static_chain_decl && crtl->has_nonlocal_goto)
2860 return;
1756cb66
VM
2861 /* Clear counts used to process conflicting allocnos only once for
2862 each allocno. */
2863 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2864 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2865 check = n = 0;
2866 /* Process each allocno and try to assign a hard register to it by
2867 spilling some its conflicting allocnos. */
2868 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2869 {
2870 a = ira_allocnos[i];
2871 ALLOCNO_COLOR_DATA (a)->temp = 0;
2872 if (empty_profitable_hard_regs (a))
2873 continue;
2874 check++;
2875 aclass = ALLOCNO_CLASS (a);
da178d56 2876 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
1756cb66
VM
2877 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2878 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2879 else if (allocno_costs == NULL)
2880 /* It means that assigning a hard register is not profitable
2881 (we don't waste memory for hard register costs in this
2882 case). */
2883 continue;
2884 else
da178d56
VM
2885 base_cost = (allocno_costs[ira_class_hard_reg_index[aclass][hregno]]
2886 - allocno_copy_cost_saving (a, hregno));
1756cb66 2887 try_p = false;
27508f5f
VM
2888 get_conflict_and_start_profitable_regs (a, false,
2889 conflicting_regs,
2890 &profitable_hard_regs);
1756cb66
VM
2891 class_size = ira_class_hard_regs_num[aclass];
2892 /* Set up cost improvement for usage of each profitable hard
2893 register for allocno A. */
2894 for (j = 0; j < class_size; j++)
2895 {
2896 hregno = ira_class_hard_regs[aclass][j];
2897 if (! check_hard_reg_p (a, hregno,
2898 conflicting_regs, profitable_hard_regs))
2899 continue;
2900 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2901 k = allocno_costs == NULL ? 0 : j;
2902 costs[hregno] = (allocno_costs == NULL
2903 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
da178d56 2904 costs[hregno] -= allocno_copy_cost_saving (a, hregno);
1756cb66
VM
2905 costs[hregno] -= base_cost;
2906 if (costs[hregno] < 0)
2907 try_p = true;
2908 }
2909 if (! try_p)
2910 /* There is no chance to improve the allocation cost by
2911 assigning hard register to allocno A even without spilling
2912 conflicting allocnos. */
2913 continue;
2914 mode = ALLOCNO_MODE (a);
2915 nwords = ALLOCNO_NUM_OBJECTS (a);
2916 /* Process each allocno conflicting with A and update the cost
2917 improvement for profitable hard registers of A. To use a
2918 hard register for A we need to spill some conflicting
2919 allocnos and that creates penalty for the cost
2920 improvement. */
2921 for (word = 0; word < nwords; word++)
2922 {
2923 ira_object_t conflict_obj;
2924 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2925 ira_object_conflict_iterator oci;
2926
2927 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2928 {
2929 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2930
2931 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2932 /* We already processed this conflicting allocno
2933 because we processed earlier another object of the
2934 conflicting allocno. */
2935 continue;
2936 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2937 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2938 continue;
2939 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2940 k = (ira_class_hard_reg_index
2941 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2942 ira_assert (k >= 0);
da178d56 2943 if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
1756cb66
VM
2944 != NULL)
2945 spill_cost -= allocno_costs[k];
1756cb66
VM
2946 else
2947 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
da178d56
VM
2948 spill_cost
2949 += allocno_copy_cost_saving (conflict_a, conflict_hregno);
ad474626
RS
2950 conflict_nregs = hard_regno_nregs (conflict_hregno,
2951 ALLOCNO_MODE (conflict_a));
1756cb66 2952 for (r = conflict_hregno;
4edd6298 2953 r >= 0 && (int) end_hard_regno (mode, r) > conflict_hregno;
1756cb66
VM
2954 r--)
2955 if (check_hard_reg_p (a, r,
2956 conflicting_regs, profitable_hard_regs))
2957 costs[r] += spill_cost;
2958 for (r = conflict_hregno + 1;
2959 r < conflict_hregno + conflict_nregs;
2960 r++)
2961 if (check_hard_reg_p (a, r,
2962 conflicting_regs, profitable_hard_regs))
2963 costs[r] += spill_cost;
2964 }
2965 }
2966 min_cost = INT_MAX;
2967 best = -1;
2968 /* Now we choose hard register for A which results in highest
2969 allocation cost improvement. */
2970 for (j = 0; j < class_size; j++)
2971 {
2972 hregno = ira_class_hard_regs[aclass][j];
2973 if (check_hard_reg_p (a, hregno,
2974 conflicting_regs, profitable_hard_regs)
2975 && min_cost > costs[hregno])
2976 {
2977 best = hregno;
2978 min_cost = costs[hregno];
2979 }
2980 }
2981 if (min_cost >= 0)
2982 /* We are in a situation when assigning any hard register to A
2983 by spilling some conflicting allocnos does not improve the
2984 allocation cost. */
2985 continue;
ad474626 2986 nregs = hard_regno_nregs (best, mode);
1756cb66
VM
2987 /* Now spill conflicting allocnos which contain a hard register
2988 of A when we assign the best chosen hard register to it. */
2989 for (word = 0; word < nwords; word++)
2990 {
2991 ira_object_t conflict_obj;
2992 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2993 ira_object_conflict_iterator oci;
2994
2995 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2996 {
2997 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2998
2999 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
3000 continue;
ad474626
RS
3001 conflict_nregs = hard_regno_nregs (conflict_hregno,
3002 ALLOCNO_MODE (conflict_a));
1756cb66
VM
3003 if (best + nregs <= conflict_hregno
3004 || conflict_hregno + conflict_nregs <= best)
3005 /* No intersection. */
3006 continue;
3007 ALLOCNO_HARD_REGNO (conflict_a) = -1;
3008 sorted_allocnos[n++] = conflict_a;
3009 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3010 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
3011 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
3012 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
3013 }
3014 }
3015 /* Assign the best chosen hard register to A. */
3016 ALLOCNO_HARD_REGNO (a) = best;
3017 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3018 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
3019 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
3020 }
3021 if (n == 0)
3022 return;
3023 /* We spilled some allocnos to assign their hard registers to other
3024 allocnos. The spilled allocnos are now in array
3025 'sorted_allocnos'. There is still a possibility that some of the
3026 spilled allocnos can get hard registers. So let us try assign
3027 them hard registers again (just a reminder -- function
3028 'assign_hard_reg' assigns hard registers only if it is possible
3029 and profitable). We process the spilled allocnos with biggest
3030 benefit to get hard register first -- see function
3031 'allocno_cost_compare_func'. */
3032 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3033 allocno_cost_compare_func);
3034 for (j = 0; j < n; j++)
3035 {
3036 a = sorted_allocnos[j];
3037 ALLOCNO_ASSIGNED_P (a) = false;
3038 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3039 {
3040 fprintf (ira_dump_file, " ");
3041 ira_print_expanded_allocno (a);
3042 fprintf (ira_dump_file, " -- ");
3043 }
3044 if (assign_hard_reg (a, false))
3045 {
3046 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3047 fprintf (ira_dump_file, "assign hard reg %d\n",
3048 ALLOCNO_HARD_REGNO (a));
3049 }
3050 else
3051 {
3052 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3053 fprintf (ira_dump_file, "assign memory\n");
3054 }
3055 }
3056}
3057
aeb9f7cf 3058/* Sort allocnos according to their priorities. */
7db7ed3c
VM
3059static int
3060allocno_priority_compare_func (const void *v1p, const void *v2p)
3061{
3062 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
3063 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
158ec018 3064 int pri1, pri2, diff;
7db7ed3c 3065
b81a2f0d
VM
3066 /* Assign hard reg to static chain pointer pseudo first when
3067 non-local goto is used. */
158ec018
AM
3068 if ((diff = (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2))
3069 - non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))) != 0)
3070 return diff;
7db7ed3c
VM
3071 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
3072 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
71af27d2
OH
3073 if (pri2 != pri1)
3074 return SORTGT (pri2, pri1);
7db7ed3c
VM
3075
3076 /* If regs are equally good, sort by allocnos, so that the results of
3077 qsort leave nothing to chance. */
3078 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
3079}
3080
058e97ec
VM
3081/* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
3082 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
3083static void
3084color_allocnos (void)
3085{
7db7ed3c 3086 unsigned int i, n;
058e97ec
VM
3087 bitmap_iterator bi;
3088 ira_allocno_t a;
3089
76763a6d 3090 setup_profitable_hard_regs ();
3b6d1699
VM
3091 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3092 {
3b6d1699
VM
3093 allocno_color_data_t data;
3094 ira_pref_t pref, next_pref;
3095
3096 a = ira_allocnos[i];
3b6d1699 3097 data = ALLOCNO_COLOR_DATA (a);
8c679205 3098 data->conflict_allocno_hard_prefs = 0;
3b6d1699
VM
3099 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
3100 {
3101 next_pref = pref->next_pref;
3102 if (! ira_hard_reg_in_set_p (pref->hard_regno,
3103 ALLOCNO_MODE (a),
3104 data->profitable_hard_regs))
3105 ira_remove_pref (pref);
3106 }
3107 }
8c679205 3108
7db7ed3c 3109 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
058e97ec 3110 {
7db7ed3c
VM
3111 n = 0;
3112 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
058e97ec 3113 {
7db7ed3c 3114 a = ira_allocnos[i];
1756cb66 3115 if (ALLOCNO_CLASS (a) == NO_REGS)
058e97ec 3116 {
7db7ed3c
VM
3117 ALLOCNO_HARD_REGNO (a) = -1;
3118 ALLOCNO_ASSIGNED_P (a) = true;
3119 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3120 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3121 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3122 {
3123 fprintf (ira_dump_file, " Spill");
22b0982c 3124 ira_print_expanded_allocno (a);
7db7ed3c
VM
3125 fprintf (ira_dump_file, "\n");
3126 }
3127 continue;
058e97ec 3128 }
7db7ed3c
VM
3129 sorted_allocnos[n++] = a;
3130 }
3131 if (n != 0)
3132 {
3133 setup_allocno_priorities (sorted_allocnos, n);
3134 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3135 allocno_priority_compare_func);
3136 for (i = 0; i < n; i++)
3137 {
3138 a = sorted_allocnos[i];
3139 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3140 {
3141 fprintf (ira_dump_file, " ");
22b0982c 3142 ira_print_expanded_allocno (a);
7db7ed3c
VM
3143 fprintf (ira_dump_file, " -- ");
3144 }
3145 if (assign_hard_reg (a, false))
3146 {
3147 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3148 fprintf (ira_dump_file, "assign hard reg %d\n",
3149 ALLOCNO_HARD_REGNO (a));
3150 }
3151 else
3152 {
3153 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3154 fprintf (ira_dump_file, "assign memory\n");
3155 }
3156 }
3157 }
3158 }
3159 else
3160 {
27508f5f 3161 form_allocno_hard_regs_nodes_forest ();
1756cb66
VM
3162 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3163 print_hard_regs_forest (ira_dump_file);
7db7ed3c
VM
3164 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3165 {
3166 a = ira_allocnos[i];
1756cb66 3167 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3b6d1699
VM
3168 {
3169 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3170 update_costs_from_prefs (a);
8c679205 3171 update_conflict_allocno_hard_prefs (a);
3b6d1699 3172 }
1756cb66 3173 else
7db7ed3c
VM
3174 {
3175 ALLOCNO_HARD_REGNO (a) = -1;
3176 ALLOCNO_ASSIGNED_P (a) = true;
1756cb66
VM
3177 /* We don't need updated costs anymore. */
3178 ira_free_allocno_updated_costs (a);
7db7ed3c
VM
3179 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3180 {
3181 fprintf (ira_dump_file, " Spill");
22b0982c 3182 ira_print_expanded_allocno (a);
7db7ed3c
VM
3183 fprintf (ira_dump_file, "\n");
3184 }
7db7ed3c 3185 }
1756cb66
VM
3186 }
3187 /* Put the allocnos into the corresponding buckets. */
3188 colorable_allocno_bucket = NULL;
3189 uncolorable_allocno_bucket = NULL;
3190 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3191 {
3192 a = ira_allocnos[i];
3193 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3194 put_allocno_into_bucket (a);
058e97ec 3195 }
7db7ed3c
VM
3196 push_allocnos_to_stack ();
3197 pop_allocnos_from_stack ();
27508f5f 3198 finish_allocno_hard_regs_nodes_forest ();
058e97ec 3199 }
1756cb66 3200 improve_allocation ();
058e97ec
VM
3201}
3202
3203\f
3204
2b9c63a2 3205/* Output information about the loop given by its LOOP_TREE_NODE. */
058e97ec
VM
3206static void
3207print_loop_title (ira_loop_tree_node_t loop_tree_node)
3208{
3209 unsigned int j;
3210 bitmap_iterator bi;
ea1c67e6
VM
3211 ira_loop_tree_node_t subloop_node, dest_loop_node;
3212 edge e;
3213 edge_iterator ei;
058e97ec 3214
2608d841
VM
3215 if (loop_tree_node->parent == NULL)
3216 fprintf (ira_dump_file,
3217 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3218 NUM_FIXED_BLOCKS);
3219 else
3220 {
3221 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3222 fprintf (ira_dump_file,
3223 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3224 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3225 loop_tree_node->loop->header->index,
3226 loop_depth (loop_tree_node->loop));
3227 }
ea1c67e6
VM
3228 for (subloop_node = loop_tree_node->children;
3229 subloop_node != NULL;
3230 subloop_node = subloop_node->next)
3231 if (subloop_node->bb != NULL)
3232 {
3233 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3234 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
fefa31b5 3235 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
ea1c67e6
VM
3236 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3237 != loop_tree_node))
3238 fprintf (ira_dump_file, "(->%d:l%d)",
2608d841 3239 e->dest->index, dest_loop_node->loop_num);
ea1c67e6
VM
3240 }
3241 fprintf (ira_dump_file, "\n all:");
49d988e7 3242 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
058e97ec
VM
3243 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3244 fprintf (ira_dump_file, "\n modified regnos:");
3245 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3246 fprintf (ira_dump_file, " %d", j);
3247 fprintf (ira_dump_file, "\n border:");
3248 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3249 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3250 fprintf (ira_dump_file, "\n Pressure:");
1756cb66 3251 for (j = 0; (int) j < ira_pressure_classes_num; j++)
058e97ec 3252 {
1756cb66 3253 enum reg_class pclass;
b8698a0f 3254
1756cb66
VM
3255 pclass = ira_pressure_classes[j];
3256 if (loop_tree_node->reg_pressure[pclass] == 0)
058e97ec 3257 continue;
1756cb66
VM
3258 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3259 loop_tree_node->reg_pressure[pclass]);
058e97ec
VM
3260 }
3261 fprintf (ira_dump_file, "\n");
3262}
3263
3264/* Color the allocnos inside loop (in the extreme case it can be all
3265 of the function) given the corresponding LOOP_TREE_NODE. The
3266 function is called for each loop during top-down traverse of the
3267 loop tree. */
3268static void
3269color_pass (ira_loop_tree_node_t loop_tree_node)
3270{
27508f5f 3271 int regno, hard_regno, index = -1, n;
058e97ec
VM
3272 int cost, exit_freq, enter_freq;
3273 unsigned int j;
3274 bitmap_iterator bi;
ef4bddc2 3275 machine_mode mode;
1756cb66 3276 enum reg_class rclass, aclass, pclass;
058e97ec
VM
3277 ira_allocno_t a, subloop_allocno;
3278 ira_loop_tree_node_t subloop_node;
3279
3280 ira_assert (loop_tree_node->bb == NULL);
3281 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3282 print_loop_title (loop_tree_node);
3283
49d988e7 3284 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
058e97ec 3285 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
27508f5f 3286 n = 0;
1756cb66
VM
3287 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3288 {
3289 a = ira_allocnos[j];
3290 n++;
1756cb66
VM
3291 if (! ALLOCNO_ASSIGNED_P (a))
3292 continue;
3293 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3294 }
3295 allocno_color_data
3296 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3297 * n);
3298 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
27508f5f
VM
3299 curr_allocno_process = 0;
3300 n = 0;
058e97ec
VM
3301 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3302 {
3303 a = ira_allocnos[j];
1756cb66
VM
3304 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3305 n++;
058e97ec 3306 }
bf08fb16 3307 init_allocno_threads ();
058e97ec
VM
3308 /* Color all mentioned allocnos including transparent ones. */
3309 color_allocnos ();
3310 /* Process caps. They are processed just once. */
7db7ed3c
VM
3311 if (flag_ira_region == IRA_REGION_MIXED
3312 || flag_ira_region == IRA_REGION_ALL)
49d988e7 3313 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
058e97ec
VM
3314 {
3315 a = ira_allocnos[j];
3316 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3317 continue;
3318 /* Remove from processing in the next loop. */
3319 bitmap_clear_bit (consideration_allocno_bitmap, j);
1756cb66
VM
3320 rclass = ALLOCNO_CLASS (a);
3321 pclass = ira_pressure_class_translate[rclass];
7db7ed3c 3322 if (flag_ira_region == IRA_REGION_MIXED
1756cb66 3323 && (loop_tree_node->reg_pressure[pclass]
f508f827 3324 <= ira_class_hard_regs_num[pclass]))
058e97ec
VM
3325 {
3326 mode = ALLOCNO_MODE (a);
3327 hard_regno = ALLOCNO_HARD_REGNO (a);
3328 if (hard_regno >= 0)
3329 {
3330 index = ira_class_hard_reg_index[rclass][hard_regno];
3331 ira_assert (index >= 0);
3332 }
3333 regno = ALLOCNO_REGNO (a);
3334 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3335 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3336 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3337 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3338 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3339 if (hard_regno >= 0)
c73ccc80 3340 update_costs_from_copies (subloop_allocno, true, true);
2b9c63a2 3341 /* We don't need updated costs anymore. */
058e97ec
VM
3342 ira_free_allocno_updated_costs (subloop_allocno);
3343 }
3344 }
3345 /* Update costs of the corresponding allocnos (not caps) in the
3346 subloops. */
3347 for (subloop_node = loop_tree_node->subloops;
3348 subloop_node != NULL;
3349 subloop_node = subloop_node->subloop_next)
3350 {
3351 ira_assert (subloop_node->bb == NULL);
3352 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3353 {
3354 a = ira_allocnos[j];
3355 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3356 mode = ALLOCNO_MODE (a);
1756cb66
VM
3357 rclass = ALLOCNO_CLASS (a);
3358 pclass = ira_pressure_class_translate[rclass];
058e97ec 3359 hard_regno = ALLOCNO_HARD_REGNO (a);
7db7ed3c 3360 /* Use hard register class here. ??? */
058e97ec
VM
3361 if (hard_regno >= 0)
3362 {
3363 index = ira_class_hard_reg_index[rclass][hard_regno];
3364 ira_assert (index >= 0);
3365 }
3366 regno = ALLOCNO_REGNO (a);
3367 /* ??? conflict costs */
3368 subloop_allocno = subloop_node->regno_allocno_map[regno];
3369 if (subloop_allocno == NULL
3370 || ALLOCNO_CAP (subloop_allocno) != NULL)
3371 continue;
1756cb66 3372 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
49d988e7
VM
3373 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3374 ALLOCNO_NUM (subloop_allocno)));
bcb21886
KY
3375 if ((flag_ira_region == IRA_REGION_MIXED
3376 && (loop_tree_node->reg_pressure[pclass]
3377 <= ira_class_hard_regs_num[pclass]))
3378 || (pic_offset_table_rtx != NULL
3c20c9bc
VM
3379 && regno == (int) REGNO (pic_offset_table_rtx))
3380 /* Avoid overlapped multi-registers. Moves between them
3381 might result in wrong code generation. */
3382 || (hard_regno >= 0
3383 && ira_reg_class_max_nregs[pclass][mode] > 1))
058e97ec
VM
3384 {
3385 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3386 {
3387 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3388 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3389 if (hard_regno >= 0)
c73ccc80 3390 update_costs_from_copies (subloop_allocno, true, true);
2b9c63a2 3391 /* We don't need updated costs anymore. */
058e97ec
VM
3392 ira_free_allocno_updated_costs (subloop_allocno);
3393 }
3394 continue;
3395 }
3396 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3397 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3398 ira_assert (regno < ira_reg_equiv_len);
55a2c322 3399 if (ira_equiv_no_lvalue_p (regno))
058e97ec
VM
3400 {
3401 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3402 {
3403 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3404 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3405 if (hard_regno >= 0)
c73ccc80 3406 update_costs_from_copies (subloop_allocno, true, true);
2b9c63a2 3407 /* We don't need updated costs anymore. */
058e97ec
VM
3408 ira_free_allocno_updated_costs (subloop_allocno);
3409 }
3410 }
3411 else if (hard_regno < 0)
3412 {
3413 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3414 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3415 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3416 }
3417 else
3418 {
1756cb66
VM
3419 aclass = ALLOCNO_CLASS (subloop_allocno);
3420 ira_init_register_move_cost_if_necessary (mode);
3421 cost = (ira_register_move_cost[mode][rclass][rclass]
058e97ec 3422 * (exit_freq + enter_freq));
cb1ca6ac 3423 ira_allocate_and_set_or_copy_costs
1756cb66
VM
3424 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3425 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
cb1ca6ac
VM
3426 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3427 ira_allocate_and_set_or_copy_costs
3428 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
1756cb66 3429 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
cb1ca6ac
VM
3430 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3431 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
058e97ec 3432 -= cost;
1756cb66 3433 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
cb1ca6ac 3434 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
1756cb66 3435 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
cb1ca6ac 3436 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
058e97ec
VM
3437 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3438 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3439 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
058e97ec
VM
3440 }
3441 }
3442 }
1756cb66 3443 ira_free (allocno_color_data);
bf08fb16 3444 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
1756cb66
VM
3445 {
3446 a = ira_allocnos[j];
3447 ALLOCNO_ADD_DATA (a) = NULL;
1756cb66 3448 }
058e97ec
VM
3449}
3450
3451/* Initialize the common data for coloring and calls functions to do
3452 Chaitin-Briggs and regional coloring. */
3453static void
3454do_coloring (void)
3455{
3456 coloring_allocno_bitmap = ira_allocate_bitmap ();
058e97ec
VM
3457 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3458 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
b8698a0f 3459
058e97ec
VM
3460 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3461
3462 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3463 ira_print_disposition (ira_dump_file);
3464
058e97ec 3465 ira_free_bitmap (coloring_allocno_bitmap);
058e97ec
VM
3466}
3467
3468\f
3469
3470/* Move spill/restore code, which are to be generated in ira-emit.c,
3471 to less frequent points (if it is profitable) by reassigning some
3472 allocnos (in loop with subloops containing in another loop) to
3473 memory which results in longer live-range where the corresponding
3474 pseudo-registers will be in memory. */
3475static void
3476move_spill_restore (void)
3477{
3478 int cost, regno, hard_regno, hard_regno2, index;
3479 bool changed_p;
3480 int enter_freq, exit_freq;
ef4bddc2 3481 machine_mode mode;
058e97ec
VM
3482 enum reg_class rclass;
3483 ira_allocno_t a, parent_allocno, subloop_allocno;
3484 ira_loop_tree_node_t parent, loop_node, subloop_node;
3485 ira_allocno_iterator ai;
3486
3487 for (;;)
3488 {
3489 changed_p = false;
3490 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3491 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3492 FOR_EACH_ALLOCNO (a, ai)
3493 {
3494 regno = ALLOCNO_REGNO (a);
3495 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3496 if (ALLOCNO_CAP_MEMBER (a) != NULL
3497 || ALLOCNO_CAP (a) != NULL
3498 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3499 || loop_node->children == NULL
3500 /* don't do the optimization because it can create
3501 copies and the reload pass can spill the allocno set
3502 by copy although the allocno will not get memory
3503 slot. */
55a2c322 3504 || ira_equiv_no_lvalue_p (regno)
b81a2f0d
VM
3505 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a))
3506 /* Do not spill static chain pointer pseudo when
3507 non-local goto is used. */
3508 || non_spilled_static_chain_regno_p (regno))
058e97ec
VM
3509 continue;
3510 mode = ALLOCNO_MODE (a);
1756cb66 3511 rclass = ALLOCNO_CLASS (a);
058e97ec
VM
3512 index = ira_class_hard_reg_index[rclass][hard_regno];
3513 ira_assert (index >= 0);
3514 cost = (ALLOCNO_MEMORY_COST (a)
3515 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
1756cb66 3516 ? ALLOCNO_CLASS_COST (a)
058e97ec 3517 : ALLOCNO_HARD_REG_COSTS (a)[index]));
1756cb66 3518 ira_init_register_move_cost_if_necessary (mode);
058e97ec
VM
3519 for (subloop_node = loop_node->subloops;
3520 subloop_node != NULL;
3521 subloop_node = subloop_node->subloop_next)
3522 {
3523 ira_assert (subloop_node->bb == NULL);
3524 subloop_allocno = subloop_node->regno_allocno_map[regno];
3525 if (subloop_allocno == NULL)
3526 continue;
1756cb66 3527 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
058e97ec
VM
3528 /* We have accumulated cost. To get the real cost of
3529 allocno usage in the loop we should subtract costs of
3530 the subloop allocnos. */
3531 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3532 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
1756cb66 3533 ? ALLOCNO_CLASS_COST (subloop_allocno)
058e97ec
VM
3534 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3535 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3536 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3537 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3538 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3539 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3540 else
3541 {
3542 cost
3543 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3544 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3545 if (hard_regno2 != hard_regno)
1756cb66 3546 cost -= (ira_register_move_cost[mode][rclass][rclass]
058e97ec
VM
3547 * (exit_freq + enter_freq));
3548 }
3549 }
3550 if ((parent = loop_node->parent) != NULL
3551 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3552 {
1756cb66 3553 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
058e97ec
VM
3554 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3555 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3556 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3557 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3558 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3559 else
3560 {
3561 cost
3562 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3563 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3564 if (hard_regno2 != hard_regno)
1756cb66 3565 cost -= (ira_register_move_cost[mode][rclass][rclass]
058e97ec
VM
3566 * (exit_freq + enter_freq));
3567 }
3568 }
3569 if (cost < 0)
3570 {
3571 ALLOCNO_HARD_REGNO (a) = -1;
3572 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3573 {
3574 fprintf
3575 (ira_dump_file,
3576 " Moving spill/restore for a%dr%d up from loop %d",
2608d841 3577 ALLOCNO_NUM (a), regno, loop_node->loop_num);
058e97ec
VM
3578 fprintf (ira_dump_file, " - profit %d\n", -cost);
3579 }
3580 changed_p = true;
3581 }
3582 }
3583 if (! changed_p)
3584 break;
3585 }
3586}
3587
3588\f
3589
3590/* Update current hard reg costs and current conflict hard reg costs
3591 for allocno A. It is done by processing its copies containing
3592 other allocnos already assigned. */
3593static void
3594update_curr_costs (ira_allocno_t a)
3595{
3596 int i, hard_regno, cost;
ef4bddc2 3597 machine_mode mode;
1756cb66 3598 enum reg_class aclass, rclass;
058e97ec
VM
3599 ira_allocno_t another_a;
3600 ira_copy_t cp, next_cp;
3601
bdf0eb06 3602 ira_free_allocno_updated_costs (a);
058e97ec 3603 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1756cb66
VM
3604 aclass = ALLOCNO_CLASS (a);
3605 if (aclass == NO_REGS)
058e97ec
VM
3606 return;
3607 mode = ALLOCNO_MODE (a);
1756cb66 3608 ira_init_register_move_cost_if_necessary (mode);
058e97ec
VM
3609 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3610 {
3611 if (cp->first == a)
3612 {
3613 next_cp = cp->next_first_allocno_copy;
3614 another_a = cp->second;
3615 }
3616 else if (cp->second == a)
3617 {
3618 next_cp = cp->next_second_allocno_copy;
3619 another_a = cp->first;
3620 }
3621 else
3622 gcc_unreachable ();
1756cb66 3623 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
058e97ec
VM
3624 || ! ALLOCNO_ASSIGNED_P (another_a)
3625 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3626 continue;
3627 rclass = REGNO_REG_CLASS (hard_regno);
1756cb66 3628 i = ira_class_hard_reg_index[aclass][hard_regno];
7db7ed3c
VM
3629 if (i < 0)
3630 continue;
058e97ec 3631 cost = (cp->first == a
1756cb66
VM
3632 ? ira_register_move_cost[mode][rclass][aclass]
3633 : ira_register_move_cost[mode][aclass][rclass]);
058e97ec 3634 ira_allocate_and_set_or_copy_costs
1756cb66 3635 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
058e97ec
VM
3636 ALLOCNO_HARD_REG_COSTS (a));
3637 ira_allocate_and_set_or_copy_costs
3638 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1756cb66 3639 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
058e97ec
VM
3640 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3641 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3642 }
3643}
3644
058e97ec
VM
3645/* Try to assign hard registers to the unassigned allocnos and
3646 allocnos conflicting with them or conflicting with allocnos whose
3647 regno >= START_REGNO. The function is called after ira_flattening,
3648 so more allocnos (including ones created in ira-emit.c) will have a
3649 chance to get a hard register. We use simple assignment algorithm
3650 based on priorities. */
3651void
3652ira_reassign_conflict_allocnos (int start_regno)
3653{
3654 int i, allocnos_to_color_num;
fa86d337 3655 ira_allocno_t a;
1756cb66 3656 enum reg_class aclass;
058e97ec
VM
3657 bitmap allocnos_to_color;
3658 ira_allocno_iterator ai;
3659
3660 allocnos_to_color = ira_allocate_bitmap ();
3661 allocnos_to_color_num = 0;
3662 FOR_EACH_ALLOCNO (a, ai)
3663 {
ac0ab4f7 3664 int n = ALLOCNO_NUM_OBJECTS (a);
fa86d337 3665
058e97ec
VM
3666 if (! ALLOCNO_ASSIGNED_P (a)
3667 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3668 {
1756cb66 3669 if (ALLOCNO_CLASS (a) != NO_REGS)
058e97ec
VM
3670 sorted_allocnos[allocnos_to_color_num++] = a;
3671 else
3672 {
3673 ALLOCNO_ASSIGNED_P (a) = true;
3674 ALLOCNO_HARD_REGNO (a) = -1;
3675 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3676 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3677 }
3678 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3679 }
3680 if (ALLOCNO_REGNO (a) < start_regno
1756cb66 3681 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
058e97ec 3682 continue;
ac0ab4f7 3683 for (i = 0; i < n; i++)
058e97ec 3684 {
ac0ab4f7
BS
3685 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3686 ira_object_t conflict_obj;
3687 ira_object_conflict_iterator oci;
1756cb66 3688
ac0ab4f7
BS
3689 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3690 {
3691 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1756cb66 3692
ac0ab4f7 3693 ira_assert (ira_reg_classes_intersect_p
1756cb66 3694 [aclass][ALLOCNO_CLASS (conflict_a)]);
fcaa4ca4 3695 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
ac0ab4f7 3696 continue;
ac0ab4f7
BS
3697 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3698 }
058e97ec
VM
3699 }
3700 }
3701 ira_free_bitmap (allocnos_to_color);
3702 if (allocnos_to_color_num > 1)
3703 {
1ae64b0f 3704 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
058e97ec
VM
3705 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3706 allocno_priority_compare_func);
3707 }
3708 for (i = 0; i < allocnos_to_color_num; i++)
3709 {
3710 a = sorted_allocnos[i];
3711 ALLOCNO_ASSIGNED_P (a) = false;
058e97ec
VM
3712 update_curr_costs (a);
3713 }
3714 for (i = 0; i < allocnos_to_color_num; i++)
3715 {
3716 a = sorted_allocnos[i];
3717 if (assign_hard_reg (a, true))
3718 {
3719 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3720 fprintf
3721 (ira_dump_file,
3722 " Secondary allocation: assign hard reg %d to reg %d\n",
3723 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3724 }
3725 }
3726}
3727
3728\f
3729
1756cb66
VM
3730/* This page contains functions used to find conflicts using allocno
3731 live ranges. */
3732
1756cb66
VM
3733#ifdef ENABLE_IRA_CHECKING
3734
3735/* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3736 intersect. This should be used when there is only one region.
3737 Currently this is used during reload. */
3738static bool
3739conflict_by_live_ranges_p (int regno1, int regno2)
3740{
3741 ira_allocno_t a1, a2;
3742
3743 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3744 && regno2 >= FIRST_PSEUDO_REGISTER);
df3e3493 3745 /* Reg info calculated by dataflow infrastructure can be different
1756cb66
VM
3746 from one calculated by regclass. */
3747 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3748 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3749 return false;
3750 return allocnos_conflict_by_live_ranges_p (a1, a2);
3751}
3752
3753#endif
3754
3755\f
3756
058e97ec
VM
3757/* This page contains code to coalesce memory stack slots used by
3758 spilled allocnos. This results in smaller stack frame, better data
3759 locality, and in smaller code for some architectures like
3760 x86/x86_64 where insn size depends on address displacement value.
3761 On the other hand, it can worsen insn scheduling after the RA but
3762 in practice it is less important than smaller stack frames. */
3763
22b0982c
VM
3764/* TRUE if we coalesced some allocnos. In other words, if we got
3765 loops formed by members first_coalesced_allocno and
3766 next_coalesced_allocno containing more one allocno. */
3767static bool allocno_coalesced_p;
3768
3769/* Bitmap used to prevent a repeated allocno processing because of
3770 coalescing. */
3771static bitmap processed_coalesced_allocno_bitmap;
3772
1756cb66
VM
3773/* See below. */
3774typedef struct coalesce_data *coalesce_data_t;
3775
3776/* To decrease footprint of ira_allocno structure we store all data
3777 needed only for coalescing in the following structure. */
3778struct coalesce_data
3779{
3780 /* Coalesced allocnos form a cyclic list. One allocno given by
3781 FIRST represents all coalesced allocnos. The
3782 list is chained by NEXT. */
3783 ira_allocno_t first;
3784 ira_allocno_t next;
3785 int temp;
3786};
3787
3788/* Container for storing allocno data concerning coalescing. */
3789static coalesce_data_t allocno_coalesce_data;
3790
3791/* Macro to access the data concerning coalescing. */
3792#define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3793
22b0982c
VM
3794/* Merge two sets of coalesced allocnos given correspondingly by
3795 allocnos A1 and A2 (more accurately merging A2 set into A1
3796 set). */
3797static void
3798merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3799{
3800 ira_allocno_t a, first, last, next;
3801
1756cb66
VM
3802 first = ALLOCNO_COALESCE_DATA (a1)->first;
3803 a = ALLOCNO_COALESCE_DATA (a2)->first;
3804 if (first == a)
22b0982c 3805 return;
1756cb66
VM
3806 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3807 a = ALLOCNO_COALESCE_DATA (a)->next)
22b0982c 3808 {
1756cb66 3809 ALLOCNO_COALESCE_DATA (a)->first = first;
22b0982c
VM
3810 if (a == a2)
3811 break;
3812 last = a;
3813 }
1756cb66
VM
3814 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3815 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3816 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
22b0982c
VM
3817}
3818
1756cb66
VM
3819/* Return TRUE if there are conflicting allocnos from two sets of
3820 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3821 use live ranges to find conflicts because conflicts are represented
3822 only for allocnos of the same allocno class and during the reload
3823 pass we coalesce allocnos for sharing stack memory slots. */
22b0982c
VM
3824static bool
3825coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3826{
1756cb66 3827 ira_allocno_t a, conflict_a;
22b0982c 3828
22b0982c
VM
3829 if (allocno_coalesced_p)
3830 {
1756cb66
VM
3831 bitmap_clear (processed_coalesced_allocno_bitmap);
3832 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3833 a = ALLOCNO_COALESCE_DATA (a)->next)
22b0982c 3834 {
1756cb66 3835 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
22b0982c
VM
3836 if (a == a1)
3837 break;
3838 }
3839 }
1756cb66
VM
3840 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3841 a = ALLOCNO_COALESCE_DATA (a)->next)
22b0982c 3842 {
1756cb66
VM
3843 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3844 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
22b0982c 3845 {
1756cb66 3846 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
22b0982c 3847 return true;
1756cb66 3848 if (conflict_a == a1)
22b0982c
VM
3849 break;
3850 }
22b0982c
VM
3851 if (a == a2)
3852 break;
3853 }
3854 return false;
3855}
3856
3857/* The major function for aggressive allocno coalescing. We coalesce
3858 only spilled allocnos. If some allocnos have been coalesced, we
3859 set up flag allocno_coalesced_p. */
3860static void
3861coalesce_allocnos (void)
3862{
3863 ira_allocno_t a;
bf08fb16 3864 ira_copy_t cp, next_cp;
22b0982c
VM
3865 unsigned int j;
3866 int i, n, cp_num, regno;
3867 bitmap_iterator bi;
3868
22b0982c
VM
3869 cp_num = 0;
3870 /* Collect copies. */
3871 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3872 {
3873 a = ira_allocnos[j];
3874 regno = ALLOCNO_REGNO (a);
3875 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
55a2c322 3876 || ira_equiv_no_lvalue_p (regno))
22b0982c
VM
3877 continue;
3878 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3879 {
3880 if (cp->first == a)
3881 {
3882 next_cp = cp->next_first_allocno_copy;
3883 regno = ALLOCNO_REGNO (cp->second);
3884 /* For priority coloring we coalesce allocnos only with
1756cb66 3885 the same allocno class not with intersected allocno
22b0982c
VM
3886 classes as it were possible. It is done for
3887 simplicity. */
3888 if ((cp->insn != NULL || cp->constraint_p)
3889 && ALLOCNO_ASSIGNED_P (cp->second)
3890 && ALLOCNO_HARD_REGNO (cp->second) < 0
55a2c322 3891 && ! ira_equiv_no_lvalue_p (regno))
22b0982c
VM
3892 sorted_copies[cp_num++] = cp;
3893 }
3894 else if (cp->second == a)
3895 next_cp = cp->next_second_allocno_copy;
3896 else
3897 gcc_unreachable ();
3898 }
3899 }
3900 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3901 /* Coalesced copies, most frequently executed first. */
3902 for (; cp_num != 0;)
3903 {
3904 for (i = 0; i < cp_num; i++)
3905 {
3906 cp = sorted_copies[i];
3907 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3908 {
3909 allocno_coalesced_p = true;
3910 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3911 fprintf
3912 (ira_dump_file,
3913 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3914 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3915 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3916 cp->freq);
3917 merge_allocnos (cp->first, cp->second);
3918 i++;
3919 break;
3920 }
3921 }
3922 /* Collect the rest of copies. */
3923 for (n = 0; i < cp_num; i++)
3924 {
3925 cp = sorted_copies[i];
1756cb66
VM
3926 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3927 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
22b0982c
VM
3928 sorted_copies[n++] = cp;
3929 }
3930 cp_num = n;
3931 }
22b0982c
VM
3932}
3933
058e97ec
VM
3934/* Usage cost and order number of coalesced allocno set to which
3935 given pseudo register belongs to. */
3936static int *regno_coalesced_allocno_cost;
3937static int *regno_coalesced_allocno_num;
3938
3939/* Sort pseudos according frequencies of coalesced allocno sets they
3940 belong to (putting most frequently ones first), and according to
3941 coalesced allocno set order numbers. */
3942static int
3943coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3944{
3945 const int regno1 = *(const int *) v1p;
3946 const int regno2 = *(const int *) v2p;
3947 int diff;
3948
3949 if ((diff = (regno_coalesced_allocno_cost[regno2]
3950 - regno_coalesced_allocno_cost[regno1])) != 0)
3951 return diff;
3952 if ((diff = (regno_coalesced_allocno_num[regno1]
3953 - regno_coalesced_allocno_num[regno2])) != 0)
3954 return diff;
3955 return regno1 - regno2;
3956}
3957
3958/* Widest width in which each pseudo reg is referred to (via subreg).
3959 It is used for sorting pseudo registers. */
bd5a2c67 3960static machine_mode *regno_max_ref_mode;
058e97ec 3961
058e97ec
VM
3962/* Sort pseudos according their slot numbers (putting ones with
3963 smaller numbers first, or last when the frame pointer is not
3964 needed). */
3965static int
3966coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3967{
3968 const int regno1 = *(const int *) v1p;
3969 const int regno2 = *(const int *) v2p;
3970 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3971 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3972 int diff, slot_num1, slot_num2;
bd5a2c67 3973 machine_mode mode1, mode2;
058e97ec
VM
3974
3975 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3976 {
3977 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
004a6ce8 3978 return regno1 - regno2;
058e97ec
VM
3979 return 1;
3980 }
3981 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3982 return -1;
3983 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3984 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3985 if ((diff = slot_num1 - slot_num2) != 0)
3986 return (frame_pointer_needed
e0bf0dc2 3987 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
bd5a2c67
RS
3988 mode1 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno1),
3989 regno_max_ref_mode[regno1]);
3990 mode2 = wider_subreg_mode (PSEUDO_REGNO_MODE (regno2),
3991 regno_max_ref_mode[regno2]);
cf098191
RS
3992 if ((diff = compare_sizes_for_sort (GET_MODE_SIZE (mode2),
3993 GET_MODE_SIZE (mode1))) != 0)
058e97ec 3994 return diff;
004a6ce8 3995 return regno1 - regno2;
058e97ec
VM
3996}
3997
3998/* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3999 for coalesced allocno sets containing allocnos with their regnos
4000 given in array PSEUDO_REGNOS of length N. */
4001static void
4002setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
4003{
4004 int i, num, regno, cost;
4005 ira_allocno_t allocno, a;
4006
4007 for (num = i = 0; i < n; i++)
4008 {
4009 regno = pseudo_regnos[i];
4010 allocno = ira_regno_allocno_map[regno];
4011 if (allocno == NULL)
4012 {
4013 regno_coalesced_allocno_cost[regno] = 0;
4014 regno_coalesced_allocno_num[regno] = ++num;
4015 continue;
4016 }
1756cb66 4017 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
058e97ec
VM
4018 continue;
4019 num++;
1756cb66
VM
4020 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4021 a = ALLOCNO_COALESCE_DATA (a)->next)
058e97ec
VM
4022 {
4023 cost += ALLOCNO_FREQ (a);
4024 if (a == allocno)
4025 break;
4026 }
1756cb66
VM
4027 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4028 a = ALLOCNO_COALESCE_DATA (a)->next)
058e97ec
VM
4029 {
4030 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
4031 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
4032 if (a == allocno)
4033 break;
4034 }
4035 }
4036}
4037
4038/* Collect spilled allocnos representing coalesced allocno sets (the
4039 first coalesced allocno). The collected allocnos are returned
4040 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
4041 number of the collected allocnos. The allocnos are given by their
4042 regnos in array PSEUDO_REGNOS of length N. */
4043static int
4044collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
4045 ira_allocno_t *spilled_coalesced_allocnos)
4046{
4047 int i, num, regno;
4048 ira_allocno_t allocno;
4049
4050 for (num = i = 0; i < n; i++)
4051 {
4052 regno = pseudo_regnos[i];
4053 allocno = ira_regno_allocno_map[regno];
4054 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
1756cb66 4055 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
058e97ec
VM
4056 continue;
4057 spilled_coalesced_allocnos[num++] = allocno;
4058 }
4059 return num;
4060}
4061
3553f0bb
VM
4062/* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
4063 given slot contains live ranges of coalesced allocnos assigned to
4064 given slot. */
b14151b5 4065static live_range_t *slot_coalesced_allocnos_live_ranges;
b15a7ae6 4066
3553f0bb
VM
4067/* Return TRUE if coalesced allocnos represented by ALLOCNO has live
4068 ranges intersected with live ranges of coalesced allocnos assigned
4069 to slot with number N. */
b15a7ae6 4070static bool
3553f0bb 4071slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
b15a7ae6 4072{
b15a7ae6 4073 ira_allocno_t a;
b15a7ae6 4074
1756cb66
VM
4075 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4076 a = ALLOCNO_COALESCE_DATA (a)->next)
b15a7ae6 4077 {
ac0ab4f7
BS
4078 int i;
4079 int nr = ALLOCNO_NUM_OBJECTS (a);
0550a77b 4080 gcc_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
ac0ab4f7
BS
4081 for (i = 0; i < nr; i++)
4082 {
4083 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1756cb66
VM
4084
4085 if (ira_live_ranges_intersect_p
4086 (slot_coalesced_allocnos_live_ranges[n],
4087 OBJECT_LIVE_RANGES (obj)))
ac0ab4f7
BS
4088 return true;
4089 }
b15a7ae6
VM
4090 if (a == allocno)
4091 break;
4092 }
4093 return false;
4094}
4095
3553f0bb
VM
4096/* Update live ranges of slot to which coalesced allocnos represented
4097 by ALLOCNO were assigned. */
b15a7ae6 4098static void
3553f0bb 4099setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
b15a7ae6 4100{
ac0ab4f7 4101 int i, n;
b15a7ae6 4102 ira_allocno_t a;
b14151b5 4103 live_range_t r;
b15a7ae6 4104
1756cb66
VM
4105 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
4106 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4107 a = ALLOCNO_COALESCE_DATA (a)->next)
b15a7ae6 4108 {
ac0ab4f7 4109 int nr = ALLOCNO_NUM_OBJECTS (a);
0550a77b 4110 gcc_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
ac0ab4f7
BS
4111 for (i = 0; i < nr; i++)
4112 {
4113 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1756cb66 4114
ac0ab4f7
BS
4115 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
4116 slot_coalesced_allocnos_live_ranges[n]
4117 = ira_merge_live_ranges
1756cb66 4118 (slot_coalesced_allocnos_live_ranges[n], r);
ac0ab4f7 4119 }
b15a7ae6
VM
4120 if (a == allocno)
4121 break;
4122 }
4123}
4124
058e97ec
VM
4125/* We have coalesced allocnos involving in copies. Coalesce allocnos
4126 further in order to share the same memory stack slot. Allocnos
4127 representing sets of allocnos coalesced before the call are given
4128 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4129 some allocnos were coalesced in the function. */
4130static bool
4131coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4132{
3553f0bb 4133 int i, j, n, last_coalesced_allocno_num;
058e97ec
VM
4134 ira_allocno_t allocno, a;
4135 bool merged_p = false;
1240d76e 4136 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
058e97ec 4137
3553f0bb 4138 slot_coalesced_allocnos_live_ranges
b14151b5 4139 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
3553f0bb 4140 memset (slot_coalesced_allocnos_live_ranges, 0,
b14151b5 4141 sizeof (live_range_t) * ira_allocnos_num);
b15a7ae6 4142 last_coalesced_allocno_num = 0;
058e97ec
VM
4143 /* Coalesce non-conflicting spilled allocnos preferring most
4144 frequently used. */
4145 for (i = 0; i < num; i++)
4146 {
4147 allocno = spilled_coalesced_allocnos[i];
1756cb66 4148 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
1240d76e 4149 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
55a2c322 4150 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
058e97ec
VM
4151 continue;
4152 for (j = 0; j < i; j++)
4153 {
4154 a = spilled_coalesced_allocnos[j];
1756cb66
VM
4155 n = ALLOCNO_COALESCE_DATA (a)->temp;
4156 if (ALLOCNO_COALESCE_DATA (a)->first == a
1240d76e 4157 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
55a2c322 4158 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
3553f0bb 4159 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
b15a7ae6
VM
4160 break;
4161 }
4162 if (j >= i)
4163 {
4164 /* No coalescing: set up number for coalesced allocnos
4165 represented by ALLOCNO. */
1756cb66 4166 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
3553f0bb 4167 setup_slot_coalesced_allocno_live_ranges (allocno);
b15a7ae6
VM
4168 }
4169 else
4170 {
058e97ec
VM
4171 allocno_coalesced_p = true;
4172 merged_p = true;
4173 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4174 fprintf (ira_dump_file,
4175 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4176 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4177 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1756cb66
VM
4178 ALLOCNO_COALESCE_DATA (allocno)->temp
4179 = ALLOCNO_COALESCE_DATA (a)->temp;
3553f0bb 4180 setup_slot_coalesced_allocno_live_ranges (allocno);
058e97ec 4181 merge_allocnos (a, allocno);
1756cb66 4182 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
058e97ec
VM
4183 }
4184 }
3553f0bb 4185 for (i = 0; i < ira_allocnos_num; i++)
9140d27b 4186 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
3553f0bb 4187 ira_free (slot_coalesced_allocnos_live_ranges);
058e97ec
VM
4188 return merged_p;
4189}
4190
4191/* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4192 subsequent assigning stack slots to them in the reload pass. To do
4193 this we coalesce spilled allocnos first to decrease the number of
4194 memory-memory move insns. This function is called by the
4195 reload. */
4196void
4197ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
bd5a2c67 4198 machine_mode *reg_max_ref_mode)
058e97ec
VM
4199{
4200 int max_regno = max_reg_num ();
4201 int i, regno, num, slot_num;
4202 ira_allocno_t allocno, a;
4203 ira_allocno_iterator ai;
4204 ira_allocno_t *spilled_coalesced_allocnos;
4205
9994ad20
KC
4206 ira_assert (! ira_use_lra_p);
4207
058e97ec
VM
4208 /* Set up allocnos can be coalesced. */
4209 coloring_allocno_bitmap = ira_allocate_bitmap ();
4210 for (i = 0; i < n; i++)
4211 {
4212 regno = pseudo_regnos[i];
4213 allocno = ira_regno_allocno_map[regno];
4214 if (allocno != NULL)
1756cb66 4215 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
058e97ec
VM
4216 }
4217 allocno_coalesced_p = false;
22b0982c 4218 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
1756cb66
VM
4219 allocno_coalesce_data
4220 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4221 * ira_allocnos_num);
4222 /* Initialize coalesce data for allocnos. */
4223 FOR_EACH_ALLOCNO (a, ai)
4224 {
4225 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4226 ALLOCNO_COALESCE_DATA (a)->first = a;
4227 ALLOCNO_COALESCE_DATA (a)->next = a;
4228 }
22b0982c 4229 coalesce_allocnos ();
058e97ec
VM
4230 ira_free_bitmap (coloring_allocno_bitmap);
4231 regno_coalesced_allocno_cost
4232 = (int *) ira_allocate (max_regno * sizeof (int));
4233 regno_coalesced_allocno_num
4234 = (int *) ira_allocate (max_regno * sizeof (int));
4235 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4236 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4237 /* Sort regnos according frequencies of the corresponding coalesced
4238 allocno sets. */
4239 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4240 spilled_coalesced_allocnos
4241 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4242 * sizeof (ira_allocno_t));
4243 /* Collect allocnos representing the spilled coalesced allocno
4244 sets. */
4245 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4246 spilled_coalesced_allocnos);
4247 if (flag_ira_share_spill_slots
4248 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4249 {
4250 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4251 qsort (pseudo_regnos, n, sizeof (int),
4252 coalesced_pseudo_reg_freq_compare);
4253 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4254 spilled_coalesced_allocnos);
4255 }
4256 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4257 allocno_coalesced_p = false;
4258 /* Assign stack slot numbers to spilled allocno sets, use smaller
4259 numbers for most frequently used coalesced allocnos. -1 is
4260 reserved for dynamic search of stack slots for pseudos spilled by
4261 the reload. */
4262 slot_num = 1;
4263 for (i = 0; i < num; i++)
4264 {
4265 allocno = spilled_coalesced_allocnos[i];
1756cb66 4266 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
058e97ec 4267 || ALLOCNO_HARD_REGNO (allocno) >= 0
55a2c322 4268 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
058e97ec
VM
4269 continue;
4270 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4271 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4272 slot_num++;
1756cb66
VM
4273 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4274 a = ALLOCNO_COALESCE_DATA (a)->next)
058e97ec
VM
4275 {
4276 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4277 ALLOCNO_HARD_REGNO (a) = -slot_num;
4278 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
bd5a2c67
RS
4279 {
4280 machine_mode mode = wider_subreg_mode
4281 (PSEUDO_REGNO_MODE (ALLOCNO_REGNO (a)),
4282 reg_max_ref_mode[ALLOCNO_REGNO (a)]);
cf098191
RS
4283 fprintf (ira_dump_file, " a%dr%d(%d,",
4284 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a));
4285 print_dec (GET_MODE_SIZE (mode), ira_dump_file, SIGNED);
4286 fprintf (ira_dump_file, ")\n");
bd5a2c67 4287 }
b8698a0f 4288
058e97ec
VM
4289 if (a == allocno)
4290 break;
4291 }
4292 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4293 fprintf (ira_dump_file, "\n");
4294 }
4295 ira_spilled_reg_stack_slots_num = slot_num - 1;
4296 ira_free (spilled_coalesced_allocnos);
4297 /* Sort regnos according the slot numbers. */
bd5a2c67 4298 regno_max_ref_mode = reg_max_ref_mode;
058e97ec 4299 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
058e97ec 4300 FOR_EACH_ALLOCNO (a, ai)
1756cb66
VM
4301 ALLOCNO_ADD_DATA (a) = NULL;
4302 ira_free (allocno_coalesce_data);
058e97ec
VM
4303 ira_free (regno_coalesced_allocno_num);
4304 ira_free (regno_coalesced_allocno_cost);
4305}
4306
4307\f
4308
4309/* This page contains code used by the reload pass to improve the
4310 final code. */
4311
4312/* The function is called from reload to mark changes in the
4313 allocation of REGNO made by the reload. Remember that reg_renumber
4314 reflects the change result. */
4315void
4316ira_mark_allocation_change (int regno)
4317{
4318 ira_allocno_t a = ira_regno_allocno_map[regno];
4319 int old_hard_regno, hard_regno, cost;
1756cb66 4320 enum reg_class aclass = ALLOCNO_CLASS (a);
058e97ec
VM
4321
4322 ira_assert (a != NULL);
4323 hard_regno = reg_renumber[regno];
4324 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4325 return;
4326 if (old_hard_regno < 0)
4327 cost = -ALLOCNO_MEMORY_COST (a);
4328 else
4329 {
1756cb66 4330 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
058e97ec 4331 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
1756cb66 4332 ? ALLOCNO_CLASS_COST (a)
058e97ec 4333 : ALLOCNO_HARD_REG_COSTS (a)
1756cb66 4334 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
c73ccc80 4335 update_costs_from_copies (a, false, false);
058e97ec
VM
4336 }
4337 ira_overall_cost -= cost;
4338 ALLOCNO_HARD_REGNO (a) = hard_regno;
4339 if (hard_regno < 0)
4340 {
4341 ALLOCNO_HARD_REGNO (a) = -1;
4342 cost += ALLOCNO_MEMORY_COST (a);
4343 }
1756cb66 4344 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
058e97ec
VM
4345 {
4346 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
1756cb66 4347 ? ALLOCNO_CLASS_COST (a)
058e97ec 4348 : ALLOCNO_HARD_REG_COSTS (a)
1756cb66 4349 [ira_class_hard_reg_index[aclass][hard_regno]]);
c73ccc80 4350 update_costs_from_copies (a, true, false);
058e97ec
VM
4351 }
4352 else
4353 /* Reload changed class of the allocno. */
4354 cost = 0;
4355 ira_overall_cost += cost;
4356}
4357
4358/* This function is called when reload deletes memory-memory move. In
4359 this case we marks that the allocation of the corresponding
4360 allocnos should be not changed in future. Otherwise we risk to get
4361 a wrong code. */
4362void
4363ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4364{
4365 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4366 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4367
4368 ira_assert (dst != NULL && src != NULL
4369 && ALLOCNO_HARD_REGNO (dst) < 0
4370 && ALLOCNO_HARD_REGNO (src) < 0);
4371 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4372 ALLOCNO_DONT_REASSIGN_P (src) = true;
4373}
4374
4375/* Try to assign a hard register (except for FORBIDDEN_REGS) to
3631be48 4376 allocno A and return TRUE in the case of success. */
058e97ec
VM
4377static bool
4378allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4379{
4380 int hard_regno;
1756cb66 4381 enum reg_class aclass;
058e97ec 4382 int regno = ALLOCNO_REGNO (a);
ac0ab4f7
BS
4383 HARD_REG_SET saved[2];
4384 int i, n;
058e97ec 4385
ac0ab4f7
BS
4386 n = ALLOCNO_NUM_OBJECTS (a);
4387 for (i = 0; i < n; i++)
4388 {
4389 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4390 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4391 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4392 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4393 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4394 call_used_reg_set);
4395 }
058e97ec 4396 ALLOCNO_ASSIGNED_P (a) = false;
1756cb66 4397 aclass = ALLOCNO_CLASS (a);
058e97ec
VM
4398 update_curr_costs (a);
4399 assign_hard_reg (a, true);
4400 hard_regno = ALLOCNO_HARD_REGNO (a);
4401 reg_renumber[regno] = hard_regno;
4402 if (hard_regno < 0)
4403 ALLOCNO_HARD_REGNO (a) = -1;
4404 else
4405 {
1756cb66
VM
4406 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4407 ira_overall_cost
4408 -= (ALLOCNO_MEMORY_COST (a)
4409 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4410 ? ALLOCNO_CLASS_COST (a)
4411 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4412 [aclass][hard_regno]]));
058e97ec 4413 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
9181a6e5
VM
4414 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4415 call_used_reg_set))
058e97ec
VM
4416 {
4417 ira_assert (flag_caller_saves);
4418 caller_save_needed = 1;
4419 }
4420 }
4421
4422 /* If we found a hard register, modify the RTL for the pseudo
4423 register to show the hard register, and mark the pseudo register
4424 live. */
4425 if (reg_renumber[regno] >= 0)
4426 {
4427 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4428 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4429 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4430 mark_home_live (regno);
4431 }
4432 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4433 fprintf (ira_dump_file, "\n");
ac0ab4f7
BS
4434 for (i = 0; i < n; i++)
4435 {
4436 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4437 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4438 }
058e97ec
VM
4439 return reg_renumber[regno] >= 0;
4440}
4441
4442/* Sort pseudos according their usage frequencies (putting most
4443 frequently ones first). */
4444static int
4445pseudo_reg_compare (const void *v1p, const void *v2p)
4446{
4447 int regno1 = *(const int *) v1p;
4448 int regno2 = *(const int *) v2p;
4449 int diff;
4450
4451 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4452 return diff;
4453 return regno1 - regno2;
4454}
4455
4456/* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4457 NUM of them) or spilled pseudos conflicting with pseudos in
4458 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4459 allocation has been changed. The function doesn't use
4460 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4461 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4462 is called by the reload pass at the end of each reload
4463 iteration. */
4464bool
4465ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4466 HARD_REG_SET bad_spill_regs,
4467 HARD_REG_SET *pseudo_forbidden_regs,
6190446b
JL
4468 HARD_REG_SET *pseudo_previous_regs,
4469 bitmap spilled)
058e97ec 4470{
016f9d9d 4471 int i, n, regno;
058e97ec 4472 bool changed_p;
fa86d337 4473 ira_allocno_t a;
058e97ec 4474 HARD_REG_SET forbidden_regs;
6190446b
JL
4475 bitmap temp = BITMAP_ALLOC (NULL);
4476
4477 /* Add pseudos which conflict with pseudos already in
4478 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4479 to allocating in two steps as some of the conflicts might have
4480 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4481 for (i = 0; i < num; i++)
4482 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4483
4484 for (i = 0, n = num; i < n; i++)
4485 {
ac0ab4f7 4486 int nr, j;
6190446b
JL
4487 int regno = spilled_pseudo_regs[i];
4488 bitmap_set_bit (temp, regno);
4489
4490 a = ira_regno_allocno_map[regno];
ac0ab4f7
BS
4491 nr = ALLOCNO_NUM_OBJECTS (a);
4492 for (j = 0; j < nr; j++)
fa86d337 4493 {
ac0ab4f7
BS
4494 ira_object_t conflict_obj;
4495 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4496 ira_object_conflict_iterator oci;
4497
4498 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
fa86d337 4499 {
ac0ab4f7
BS
4500 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4501 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4502 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
fcaa4ca4 4503 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
ac0ab4f7
BS
4504 {
4505 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
ac0ab4f7
BS
4506 /* ?!? This seems wrong. */
4507 bitmap_set_bit (consideration_allocno_bitmap,
4508 ALLOCNO_NUM (conflict_a));
4509 }
fa86d337
BS
4510 }
4511 }
6190446b 4512 }
058e97ec
VM
4513
4514 if (num > 1)
4515 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4516 changed_p = false;
4517 /* Try to assign hard registers to pseudos from
4518 SPILLED_PSEUDO_REGS. */
016f9d9d 4519 for (i = 0; i < num; i++)
058e97ec
VM
4520 {
4521 regno = spilled_pseudo_regs[i];
4522 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4523 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4524 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4525 gcc_assert (reg_renumber[regno] < 0);
4526 a = ira_regno_allocno_map[regno];
4527 ira_mark_allocation_change (regno);
4528 ira_assert (reg_renumber[regno] < 0);
4529 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4530 fprintf (ira_dump_file,
6190446b 4531 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
058e97ec 4532 ALLOCNO_MEMORY_COST (a)
1756cb66 4533 - ALLOCNO_CLASS_COST (a));
058e97ec
VM
4534 allocno_reload_assign (a, forbidden_regs);
4535 if (reg_renumber[regno] >= 0)
4536 {
4537 CLEAR_REGNO_REG_SET (spilled, regno);
4538 changed_p = true;
4539 }
058e97ec 4540 }
6190446b 4541 BITMAP_FREE (temp);
058e97ec
VM
4542 return changed_p;
4543}
4544
4545/* The function is called by reload and returns already allocated
4546 stack slot (if any) for REGNO with given INHERENT_SIZE and
4547 TOTAL_SIZE. In the case of failure to find a slot which can be
4548 used for REGNO, the function returns NULL. */
4549rtx
80ce7eb4
RS
4550ira_reuse_stack_slot (int regno, poly_uint64 inherent_size,
4551 poly_uint64 total_size)
058e97ec
VM
4552{
4553 unsigned int i;
4554 int slot_num, best_slot_num;
4555 int cost, best_cost;
4556 ira_copy_t cp, next_cp;
4557 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4558 rtx x;
4559 bitmap_iterator bi;
4560 struct ira_spilled_reg_stack_slot *slot = NULL;
4561
9994ad20
KC
4562 ira_assert (! ira_use_lra_p);
4563
80ce7eb4
RS
4564 ira_assert (known_eq (inherent_size, PSEUDO_REGNO_BYTES (regno))
4565 && known_le (inherent_size, total_size)
058e97ec
VM
4566 && ALLOCNO_HARD_REGNO (allocno) < 0);
4567 if (! flag_ira_share_spill_slots)
4568 return NULL_RTX;
4569 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4570 if (slot_num != -1)
4571 {
4572 slot = &ira_spilled_reg_stack_slots[slot_num];
4573 x = slot->mem;
4574 }
4575 else
4576 {
4577 best_cost = best_slot_num = -1;
4578 x = NULL_RTX;
4579 /* It means that the pseudo was spilled in the reload pass, try
4580 to reuse a slot. */
4581 for (slot_num = 0;
4582 slot_num < ira_spilled_reg_stack_slots_num;
4583 slot_num++)
4584 {
4585 slot = &ira_spilled_reg_stack_slots[slot_num];
4586 if (slot->mem == NULL_RTX)
4587 continue;
80ce7eb4
RS
4588 if (maybe_lt (slot->width, total_size)
4589 || maybe_lt (GET_MODE_SIZE (GET_MODE (slot->mem)), inherent_size))
058e97ec 4590 continue;
b8698a0f 4591
058e97ec
VM
4592 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4593 FIRST_PSEUDO_REGISTER, i, bi)
4594 {
4595 another_allocno = ira_regno_allocno_map[i];
1756cb66
VM
4596 if (allocnos_conflict_by_live_ranges_p (allocno,
4597 another_allocno))
058e97ec
VM
4598 goto cont;
4599 }
4600 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4601 cp != NULL;
4602 cp = next_cp)
4603 {
4604 if (cp->first == allocno)
4605 {
4606 next_cp = cp->next_first_allocno_copy;
4607 another_allocno = cp->second;
4608 }
4609 else if (cp->second == allocno)
4610 {
4611 next_cp = cp->next_second_allocno_copy;
4612 another_allocno = cp->first;
4613 }
4614 else
4615 gcc_unreachable ();
4616 if (cp->insn == NULL_RTX)
4617 continue;
4618 if (bitmap_bit_p (&slot->spilled_regs,
4619 ALLOCNO_REGNO (another_allocno)))
4620 cost += cp->freq;
4621 }
4622 if (cost > best_cost)
4623 {
4624 best_cost = cost;
4625 best_slot_num = slot_num;
4626 }
4627 cont:
4628 ;
4629 }
4630 if (best_cost >= 0)
4631 {
99b96649
EB
4632 slot_num = best_slot_num;
4633 slot = &ira_spilled_reg_stack_slots[slot_num];
058e97ec
VM
4634 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4635 x = slot->mem;
99b96649 4636 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
058e97ec
VM
4637 }
4638 }
4639 if (x != NULL_RTX)
4640 {
80ce7eb4 4641 ira_assert (known_ge (slot->width, total_size));
f7556aae 4642#ifdef ENABLE_IRA_CHECKING
058e97ec
VM
4643 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4644 FIRST_PSEUDO_REGISTER, i, bi)
4645 {
1756cb66 4646 ira_assert (! conflict_by_live_ranges_p (regno, i));
058e97ec 4647 }
f7556aae 4648#endif
058e97ec
VM
4649 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4650 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4651 {
4652 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4653 regno, REG_FREQ (regno), slot_num);
4654 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4655 FIRST_PSEUDO_REGISTER, i, bi)
4656 {
4657 if ((unsigned) regno != i)
4658 fprintf (ira_dump_file, " %d", i);
4659 }
4660 fprintf (ira_dump_file, "\n");
4661 }
4662 }
4663 return x;
4664}
4665
4666/* This is called by reload every time a new stack slot X with
4667 TOTAL_SIZE was allocated for REGNO. We store this info for
4668 subsequent ira_reuse_stack_slot calls. */
4669void
80ce7eb4 4670ira_mark_new_stack_slot (rtx x, int regno, poly_uint64 total_size)
058e97ec
VM
4671{
4672 struct ira_spilled_reg_stack_slot *slot;
4673 int slot_num;
4674 ira_allocno_t allocno;
4675
9994ad20
KC
4676 ira_assert (! ira_use_lra_p);
4677
80ce7eb4 4678 ira_assert (known_le (PSEUDO_REGNO_BYTES (regno), total_size));
058e97ec
VM
4679 allocno = ira_regno_allocno_map[regno];
4680 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4681 if (slot_num == -1)
4682 {
4683 slot_num = ira_spilled_reg_stack_slots_num++;
4684 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4685 }
4686 slot = &ira_spilled_reg_stack_slots[slot_num];
4687 INIT_REG_SET (&slot->spilled_regs);
4688 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4689 slot->mem = x;
4690 slot->width = total_size;
4691 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4692 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4693 regno, REG_FREQ (regno), slot_num);
4694}
4695
4696
4697/* Return spill cost for pseudo-registers whose numbers are in array
4698 REGNOS (with a negative number as an end marker) for reload with
4699 given IN and OUT for INSN. Return also number points (through
4700 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4701 the register pressure is high, number of references of the
4702 pseudo-registers (through NREFS), number of callee-clobbered
4703 hard-registers occupied by the pseudo-registers (through
4704 CALL_USED_COUNT), and the first hard regno occupied by the
4705 pseudo-registers (through FIRST_HARD_REGNO). */
4706static int
8c797f81 4707calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
058e97ec
VM
4708 int *excess_pressure_live_length,
4709 int *nrefs, int *call_used_count, int *first_hard_regno)
4710{
4711 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4712 bool in_p, out_p;
4713 int length;
4714 ira_allocno_t a;
4715
4716 *nrefs = 0;
4717 for (length = count = cost = i = 0;; i++)
4718 {
4719 regno = regnos[i];
4720 if (regno < 0)
4721 break;
4722 *nrefs += REG_N_REFS (regno);
4723 hard_regno = reg_renumber[regno];
4724 ira_assert (hard_regno >= 0);
4725 a = ira_regno_allocno_map[regno];
ac0ab4f7 4726 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
1756cb66 4727 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
ad474626 4728 nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a));
058e97ec
VM
4729 for (j = 0; j < nregs; j++)
4730 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4731 break;
4732 if (j == nregs)
4733 count++;
4734 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4735 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4736 if ((in_p || out_p)
4737 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4738 {
4739 saved_cost = 0;
4740 if (in_p)
4741 saved_cost += ira_memory_move_cost
1756cb66 4742 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
058e97ec
VM
4743 if (out_p)
4744 saved_cost
4745 += ira_memory_move_cost
1756cb66 4746 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
058e97ec
VM
4747 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4748 }
4749 }
4750 *excess_pressure_live_length = length;
4751 *call_used_count = count;
4752 hard_regno = -1;
4753 if (regnos[0] >= 0)
4754 {
4755 hard_regno = reg_renumber[regnos[0]];
4756 }
4757 *first_hard_regno = hard_regno;
4758 return cost;
4759}
4760
4761/* Return TRUE if spilling pseudo-registers whose numbers are in array
4762 REGNOS is better than spilling pseudo-registers with numbers in
4763 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4764 function used by the reload pass to make better register spilling
4765 decisions. */
4766bool
4767ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
8c797f81 4768 rtx in, rtx out, rtx_insn *insn)
058e97ec
VM
4769{
4770 int cost, other_cost;
4771 int length, other_length;
4772 int nrefs, other_nrefs;
4773 int call_used_count, other_call_used_count;
4774 int hard_regno, other_hard_regno;
4775
b8698a0f 4776 cost = calculate_spill_cost (regnos, in, out, insn,
058e97ec
VM
4777 &length, &nrefs, &call_used_count, &hard_regno);
4778 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4779 &other_length, &other_nrefs,
4780 &other_call_used_count,
4781 &other_hard_regno);
4782 if (nrefs == 0 && other_nrefs != 0)
4783 return true;
4784 if (nrefs != 0 && other_nrefs == 0)
4785 return false;
4786 if (cost != other_cost)
4787 return cost < other_cost;
4788 if (length != other_length)
4789 return length > other_length;
4790#ifdef REG_ALLOC_ORDER
4791 if (hard_regno >= 0 && other_hard_regno >= 0)
4792 return (inv_reg_alloc_order[hard_regno]
4793 < inv_reg_alloc_order[other_hard_regno]);
4794#else
4795 if (call_used_count != other_call_used_count)
4796 return call_used_count > other_call_used_count;
4797#endif
4798 return false;
4799}
4800
4801\f
4802
4803/* Allocate and initialize data necessary for assign_hard_reg. */
4804void
4805ira_initiate_assign (void)
4806{
4807 sorted_allocnos
4808 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4809 * ira_allocnos_num);
4810 consideration_allocno_bitmap = ira_allocate_bitmap ();
4811 initiate_cost_update ();
4812 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
bf08fb16
VM
4813 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4814 * sizeof (ira_copy_t));
058e97ec
VM
4815}
4816
4817/* Deallocate data used by assign_hard_reg. */
4818void
4819ira_finish_assign (void)
4820{
4821 ira_free (sorted_allocnos);
4822 ira_free_bitmap (consideration_allocno_bitmap);
4823 finish_cost_update ();
4824 ira_free (allocno_priorities);
bf08fb16 4825 ira_free (sorted_copies);
058e97ec
VM
4826}
4827
4828\f
4829
4830/* Entry function doing color-based register allocation. */
cb1ca6ac
VM
4831static void
4832color (void)
058e97ec 4833{
9771b263 4834 allocno_stack_vec.create (ira_allocnos_num);
058e97ec
VM
4835 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4836 ira_initiate_assign ();
4837 do_coloring ();
4838 ira_finish_assign ();
9771b263 4839 allocno_stack_vec.release ();
058e97ec
VM
4840 move_spill_restore ();
4841}
4842
4843\f
4844
4845/* This page contains a simple register allocator without usage of
4846 allocno conflicts. This is used for fast allocation for -O0. */
4847
4848/* Do register allocation by not using allocno conflicts. It uses
4849 only allocno live ranges. The algorithm is close to Chow's
4850 priority coloring. */
cb1ca6ac
VM
4851static void
4852fast_allocation (void)
058e97ec 4853{
159fdc39
VM
4854 int i, j, k, num, class_size, hard_regno, best_hard_regno, cost, min_cost;
4855 int *costs;
058e97ec
VM
4856#ifdef STACK_REGS
4857 bool no_stack_reg_p;
4858#endif
1756cb66 4859 enum reg_class aclass;
ef4bddc2 4860 machine_mode mode;
058e97ec
VM
4861 ira_allocno_t a;
4862 ira_allocno_iterator ai;
b14151b5 4863 live_range_t r;
058e97ec
VM
4864 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4865
058e97ec
VM
4866 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4867 * ira_allocnos_num);
4868 num = 0;
4869 FOR_EACH_ALLOCNO (a, ai)
4870 sorted_allocnos[num++] = a;
1ae64b0f
VM
4871 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4872 setup_allocno_priorities (sorted_allocnos, num);
4873 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4874 * ira_max_point);
4875 for (i = 0; i < ira_max_point; i++)
4876 CLEAR_HARD_REG_SET (used_hard_regs[i]);
311aab06 4877 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
058e97ec
VM
4878 allocno_priority_compare_func);
4879 for (i = 0; i < num; i++)
4880 {
ac0ab4f7
BS
4881 int nr, l;
4882
058e97ec 4883 a = sorted_allocnos[i];
ac0ab4f7
BS
4884 nr = ALLOCNO_NUM_OBJECTS (a);
4885 CLEAR_HARD_REG_SET (conflict_hard_regs);
4886 for (l = 0; l < nr; l++)
4887 {
4888 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4889 IOR_HARD_REG_SET (conflict_hard_regs,
4890 OBJECT_CONFLICT_HARD_REGS (obj));
4891 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4892 for (j = r->start; j <= r->finish; j++)
4893 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4894 }
1756cb66 4895 aclass = ALLOCNO_CLASS (a);
6b8d9676
VM
4896 ALLOCNO_ASSIGNED_P (a) = true;
4897 ALLOCNO_HARD_REGNO (a) = -1;
1756cb66 4898 if (hard_reg_set_subset_p (reg_class_contents[aclass],
058e97ec
VM
4899 conflict_hard_regs))
4900 continue;
4901 mode = ALLOCNO_MODE (a);
4902#ifdef STACK_REGS
4903 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4904#endif
1756cb66 4905 class_size = ira_class_hard_regs_num[aclass];
159fdc39
VM
4906 costs = ALLOCNO_HARD_REG_COSTS (a);
4907 min_cost = INT_MAX;
4908 best_hard_regno = -1;
058e97ec
VM
4909 for (j = 0; j < class_size; j++)
4910 {
1756cb66 4911 hard_regno = ira_class_hard_regs[aclass][j];
058e97ec
VM
4912#ifdef STACK_REGS
4913 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4914 && hard_regno <= LAST_STACK_REG)
4915 continue;
4916#endif
9181a6e5 4917 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
058e97ec 4918 || (TEST_HARD_REG_BIT
1756cb66 4919 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
058e97ec 4920 continue;
159fdc39
VM
4921 if (costs == NULL)
4922 {
4923 best_hard_regno = hard_regno;
4924 break;
4925 }
4926 cost = costs[j];
4927 if (min_cost > cost)
ac0ab4f7 4928 {
159fdc39
VM
4929 min_cost = cost;
4930 best_hard_regno = hard_regno;
ac0ab4f7 4931 }
159fdc39
VM
4932 }
4933 if (best_hard_regno < 0)
4934 continue;
4935 ALLOCNO_HARD_REGNO (a) = hard_regno = best_hard_regno;
4936 for (l = 0; l < nr; l++)
4937 {
4938 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4939 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4940 for (k = r->start; k <= r->finish; k++)
4941 IOR_HARD_REG_SET (used_hard_regs[k],
4942 ira_reg_mode_hard_regset[hard_regno][mode]);
058e97ec
VM
4943 }
4944 }
4945 ira_free (sorted_allocnos);
4946 ira_free (used_hard_regs);
4947 ira_free (allocno_priorities);
4948 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4949 ira_print_disposition (ira_dump_file);
4950}
cb1ca6ac
VM
4951
4952\f
4953
4954/* Entry function doing coloring. */
4955void
4956ira_color (void)
4957{
4958 ira_allocno_t a;
4959 ira_allocno_iterator ai;
4960
4961 /* Setup updated costs. */
4962 FOR_EACH_ALLOCNO (a, ai)
4963 {
4964 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
1756cb66 4965 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
cb1ca6ac 4966 }
311aab06 4967 if (ira_conflicts_p)
cb1ca6ac
VM
4968 color ();
4969 else
4970 fast_allocation ();
4971}