]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-color.c
ira.h (target_ira): Delete x_ira_available_class_regs.
[thirdparty/gcc.git] / gcc / ira-color.c
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "sbitmap.h"
32 #include "bitmap.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "reload.h"
38 #include "params.h"
39 #include "df.h"
40 #include "ira-int.h"
41
42 typedef struct allocno_hard_regs *allocno_hard_regs_t;
43
44 /* The structure contains information about hard registers can be
45 assigned to allocnos. Usually it is allocno profitable hard
46 registers but in some cases this set can be a bit different. Major
47 reason of the difference is a requirement to use hard register sets
48 that form a tree or a forest (set of trees), i.e. hard register set
49 of a node should contain hard register sets of its subnodes. */
50 struct allocno_hard_regs
51 {
52 /* Hard registers can be assigned to an allocno. */
53 HARD_REG_SET set;
54 /* Overall (spilling) cost of all allocnos with given register
55 set. */
56 HOST_WIDEST_INT cost;
57 };
58
59 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
60
61 /* A node representing allocno hard registers. Such nodes form a
62 forest (set of trees). Each subnode of given node in the forest
63 refers for hard register set (usually allocno profitable hard
64 register set) which is a subset of one referred from given
65 node. */
66 struct allocno_hard_regs_node
67 {
68 /* Set up number of the node in preorder traversing of the forest. */
69 int preorder_num;
70 /* Used for different calculation like finding conflict size of an
71 allocno. */
72 int check;
73 /* Used for calculation of conflict size of an allocno. The
74 conflict size of the allocno is maximal number of given allocno
75 hard registers needed for allocation of the conflicting allocnos.
76 Given allocno is trivially colored if this number plus the number
77 of hard registers needed for given allocno is not greater than
78 the number of given allocno hard register set. */
79 int conflict_size;
80 /* The number of hard registers given by member hard_regs. */
81 int hard_regs_num;
82 /* The following member is used to form the final forest. */
83 bool used_p;
84 /* Pointer to the corresponding profitable hard registers. */
85 allocno_hard_regs_t hard_regs;
86 /* Parent, first subnode, previous and next node with the same
87 parent in the forest. */
88 allocno_hard_regs_node_t parent, first, prev, next;
89 };
90
91 /* To decrease footprint of ira_allocno structure we store all data
92 needed only for coloring in the following structure. */
93 struct allocno_color_data
94 {
95 /* TRUE value means that the allocno was not removed yet from the
96 conflicting graph during colouring. */
97 unsigned int in_graph_p : 1;
98 /* TRUE if it is put on the stack to make other allocnos
99 colorable. */
100 unsigned int may_be_spilled_p : 1;
101 /* TRUE if the allocno is trivially colorable. */
102 unsigned int colorable_p : 1;
103 /* Number of hard registers of the allocno class really
104 available for the allocno allocation. It is number of the
105 profitable hard regs. */
106 int available_regs_num;
107 /* Allocnos in a bucket (used in coloring) chained by the following
108 two members. */
109 ira_allocno_t next_bucket_allocno;
110 ira_allocno_t prev_bucket_allocno;
111 /* Used for temporary purposes. */
112 int temp;
113 /* Used to exclude repeated processing. */
114 int last_process;
115 /* Profitable hard regs available for this pseudo allocation. It
116 means that the set excludes unavailable hard regs and hard regs
117 conflicting with given pseudo. They should be of the allocno
118 class. */
119 HARD_REG_SET profitable_hard_regs;
120 /* The allocno hard registers node. */
121 allocno_hard_regs_node_t hard_regs_node;
122 /* Array of structures allocno_hard_regs_subnode representing
123 given allocno hard registers node (the 1st element in the array)
124 and all its subnodes in the tree (forest) of allocno hard
125 register nodes (see comments above). */
126 int hard_regs_subnodes_start;
127 /* The length of the previous array. */
128 int hard_regs_subnodes_num;
129 };
130
131 /* See above. */
132 typedef struct allocno_color_data *allocno_color_data_t;
133
134 /* Container for storing allocno data concerning coloring. */
135 static allocno_color_data_t allocno_color_data;
136
137 /* Macro to access the data concerning coloring. */
138 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
139
140 /* Used for finding allocno colorability to exclude repeated allocno
141 processing and for updating preferencing to exclude repeated
142 allocno processing during assignment. */
143 static int curr_allocno_process;
144
145 /* This file contains code for regional graph coloring, spill/restore
146 code placement optimization, and code helping the reload pass to do
147 a better job. */
148
149 /* Bitmap of allocnos which should be colored. */
150 static bitmap coloring_allocno_bitmap;
151
152 /* Bitmap of allocnos which should be taken into account during
153 coloring. In general case it contains allocnos from
154 coloring_allocno_bitmap plus other already colored conflicting
155 allocnos. */
156 static bitmap consideration_allocno_bitmap;
157
158 /* All allocnos sorted according their priorities. */
159 static ira_allocno_t *sorted_allocnos;
160
161 /* Vec representing the stack of allocnos used during coloring. */
162 static VEC(ira_allocno_t,heap) *allocno_stack_vec;
163
164 /* Helper for qsort comparison callbacks - return a positive integer if
165 X > Y, or a negative value otherwise. Use a conditional expression
166 instead of a difference computation to insulate from possible overflow
167 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
168 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
169
170 \f
171
172 /* Definition of vector of allocno hard registers. */
173 DEF_VEC_P(allocno_hard_regs_t);
174 DEF_VEC_ALLOC_P(allocno_hard_regs_t, heap);
175
176 /* Vector of unique allocno hard registers. */
177 static VEC(allocno_hard_regs_t, heap) *allocno_hard_regs_vec;
178
179 /* Returns hash value for allocno hard registers V. */
180 static hashval_t
181 allocno_hard_regs_hash (const void *v)
182 {
183 const struct allocno_hard_regs *hv = (const struct allocno_hard_regs *) v;
184
185 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
186 }
187
188 /* Compares allocno hard registers V1 and V2. */
189 static int
190 allocno_hard_regs_eq (const void *v1, const void *v2)
191 {
192 const struct allocno_hard_regs *hv1 = (const struct allocno_hard_regs *) v1;
193 const struct allocno_hard_regs *hv2 = (const struct allocno_hard_regs *) v2;
194
195 return hard_reg_set_equal_p (hv1->set, hv2->set);
196 }
197
198 /* Hash table of unique allocno hard registers. */
199 static htab_t allocno_hard_regs_htab;
200
201 /* Return allocno hard registers in the hash table equal to HV. */
202 static allocno_hard_regs_t
203 find_hard_regs (allocno_hard_regs_t hv)
204 {
205 return (allocno_hard_regs_t) htab_find (allocno_hard_regs_htab, hv);
206 }
207
208 /* Insert allocno hard registers HV in the hash table (if it is not
209 there yet) and return the value which in the table. */
210 static allocno_hard_regs_t
211 insert_hard_regs (allocno_hard_regs_t hv)
212 {
213 PTR *slot = htab_find_slot (allocno_hard_regs_htab, hv, INSERT);
214
215 if (*slot == NULL)
216 *slot = hv;
217 return (allocno_hard_regs_t) *slot;
218 }
219
220 /* Initialize data concerning allocno hard registers. */
221 static void
222 init_allocno_hard_regs (void)
223 {
224 allocno_hard_regs_vec = VEC_alloc (allocno_hard_regs_t, heap, 200);
225 allocno_hard_regs_htab
226 = htab_create (200, allocno_hard_regs_hash, allocno_hard_regs_eq, NULL);
227 }
228
229 /* Add (or update info about) allocno hard registers with SET and
230 COST. */
231 static allocno_hard_regs_t
232 add_allocno_hard_regs (HARD_REG_SET set, HOST_WIDEST_INT cost)
233 {
234 struct allocno_hard_regs temp;
235 allocno_hard_regs_t hv;
236
237 gcc_assert (! hard_reg_set_empty_p (set));
238 COPY_HARD_REG_SET (temp.set, set);
239 if ((hv = find_hard_regs (&temp)) != NULL)
240 hv->cost += cost;
241 else
242 {
243 hv = ((struct allocno_hard_regs *)
244 ira_allocate (sizeof (struct allocno_hard_regs)));
245 COPY_HARD_REG_SET (hv->set, set);
246 hv->cost = cost;
247 VEC_safe_push (allocno_hard_regs_t, heap, allocno_hard_regs_vec, hv);
248 insert_hard_regs (hv);
249 }
250 return hv;
251 }
252
253 /* Finalize data concerning allocno hard registers. */
254 static void
255 finish_allocno_hard_regs (void)
256 {
257 int i;
258 allocno_hard_regs_t hv;
259
260 for (i = 0;
261 VEC_iterate (allocno_hard_regs_t, allocno_hard_regs_vec, i, hv);
262 i++)
263 ira_free (hv);
264 htab_delete (allocno_hard_regs_htab);
265 VEC_free (allocno_hard_regs_t, heap, allocno_hard_regs_vec);
266 }
267
268 /* Sort hard regs according to their frequency of usage. */
269 static int
270 allocno_hard_regs_compare (const void *v1p, const void *v2p)
271 {
272 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
273 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
274
275 if (hv2->cost > hv1->cost)
276 return 1;
277 else if (hv2->cost < hv1->cost)
278 return -1;
279 else
280 return 0;
281 }
282
283 \f
284
285 /* Used for finding a common ancestor of two allocno hard registers
286 nodes in the forest. We use the current value of
287 'node_check_tick' to mark all nodes from one node to the top and
288 then walking up from another node until we find a marked node.
289
290 It is also used to figure out allocno colorability as a mark that
291 we already reset value of member 'conflict_size' for the forest
292 node corresponding to the processed allocno. */
293 static int node_check_tick;
294
295 /* Roots of the forest containing hard register sets can be assigned
296 to allocnos. */
297 static allocno_hard_regs_node_t hard_regs_roots;
298
299 /* Definition of vector of allocno hard register nodes. */
300 DEF_VEC_P(allocno_hard_regs_node_t);
301 DEF_VEC_ALLOC_P(allocno_hard_regs_node_t, heap);
302
303 /* Vector used to create the forest. */
304 static VEC(allocno_hard_regs_node_t, heap) *hard_regs_node_vec;
305
306 /* Create and return allocno hard registers node containing allocno
307 hard registers HV. */
308 static allocno_hard_regs_node_t
309 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
310 {
311 allocno_hard_regs_node_t new_node;
312
313 new_node = ((struct allocno_hard_regs_node *)
314 ira_allocate (sizeof (struct allocno_hard_regs_node)));
315 new_node->check = 0;
316 new_node->hard_regs = hv;
317 new_node->hard_regs_num = hard_reg_set_size (hv->set);
318 new_node->first = NULL;
319 new_node->used_p = false;
320 return new_node;
321 }
322
323 /* Add allocno hard registers node NEW_NODE to the forest on its level
324 given by ROOTS. */
325 static void
326 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
327 allocno_hard_regs_node_t new_node)
328 {
329 new_node->next = *roots;
330 if (new_node->next != NULL)
331 new_node->next->prev = new_node;
332 new_node->prev = NULL;
333 *roots = new_node;
334 }
335
336 /* Add allocno hard registers HV (or its best approximation if it is
337 not possible) to the forest on its level given by ROOTS. */
338 static void
339 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
340 allocno_hard_regs_t hv)
341 {
342 unsigned int i, start;
343 allocno_hard_regs_node_t node, prev, new_node;
344 HARD_REG_SET temp_set;
345 allocno_hard_regs_t hv2;
346
347 start = VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
348 for (node = *roots; node != NULL; node = node->next)
349 {
350 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
351 return;
352 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
353 {
354 add_allocno_hard_regs_to_forest (&node->first, hv);
355 return;
356 }
357 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
358 VEC_safe_push (allocno_hard_regs_node_t, heap,
359 hard_regs_node_vec, node);
360 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
361 {
362 COPY_HARD_REG_SET (temp_set, hv->set);
363 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
364 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
365 add_allocno_hard_regs_to_forest (&node->first, hv2);
366 }
367 }
368 if (VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec)
369 > start + 1)
370 {
371 /* Create a new node which contains nodes in hard_regs_node_vec. */
372 CLEAR_HARD_REG_SET (temp_set);
373 for (i = start;
374 i < VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
375 i++)
376 {
377 node = VEC_index (allocno_hard_regs_node_t, hard_regs_node_vec, i);
378 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
379 }
380 hv = add_allocno_hard_regs (temp_set, hv->cost);
381 new_node = create_new_allocno_hard_regs_node (hv);
382 prev = NULL;
383 for (i = start;
384 i < VEC_length (allocno_hard_regs_node_t, hard_regs_node_vec);
385 i++)
386 {
387 node = VEC_index (allocno_hard_regs_node_t, hard_regs_node_vec, i);
388 if (node->prev == NULL)
389 *roots = node->next;
390 else
391 node->prev->next = node->next;
392 if (node->next != NULL)
393 node->next->prev = node->prev;
394 if (prev == NULL)
395 new_node->first = node;
396 else
397 prev->next = node;
398 node->prev = prev;
399 node->next = NULL;
400 prev = node;
401 }
402 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
403 }
404 VEC_truncate (allocno_hard_regs_node_t, hard_regs_node_vec, start);
405 }
406
407 /* Add allocno hard registers nodes starting with the forest level
408 given by FIRST which contains biggest set inside SET. */
409 static void
410 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
411 HARD_REG_SET set)
412 {
413 allocno_hard_regs_node_t node;
414
415 ira_assert (first != NULL);
416 for (node = first; node != NULL; node = node->next)
417 if (hard_reg_set_subset_p (node->hard_regs->set, set))
418 VEC_safe_push (allocno_hard_regs_node_t, heap, hard_regs_node_vec,
419 node);
420 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
421 collect_allocno_hard_regs_cover (node->first, set);
422 }
423
424 /* Set up field parent as PARENT in all allocno hard registers nodes
425 in forest given by FIRST. */
426 static void
427 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
428 allocno_hard_regs_node_t parent)
429 {
430 allocno_hard_regs_node_t node;
431
432 for (node = first; node != NULL; node = node->next)
433 {
434 node->parent = parent;
435 setup_allocno_hard_regs_nodes_parent (node->first, node);
436 }
437 }
438
439 /* Return allocno hard registers node which is a first common ancestor
440 node of FIRST and SECOND in the forest. */
441 static allocno_hard_regs_node_t
442 first_common_ancestor_node (allocno_hard_regs_node_t first,
443 allocno_hard_regs_node_t second)
444 {
445 allocno_hard_regs_node_t node;
446
447 node_check_tick++;
448 for (node = first; node != NULL; node = node->parent)
449 node->check = node_check_tick;
450 for (node = second; node != NULL; node = node->parent)
451 if (node->check == node_check_tick)
452 return node;
453 return first_common_ancestor_node (second, first);
454 }
455
456 /* Print hard reg set SET to F. */
457 static void
458 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
459 {
460 int i, start;
461
462 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
463 {
464 if (TEST_HARD_REG_BIT (set, i))
465 {
466 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
467 start = i;
468 }
469 if (start >= 0
470 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
471 {
472 if (start == i - 1)
473 fprintf (f, " %d", start);
474 else if (start == i - 2)
475 fprintf (f, " %d %d", start, start + 1);
476 else
477 fprintf (f, " %d-%d", start, i - 1);
478 start = -1;
479 }
480 }
481 if (new_line_p)
482 fprintf (f, "\n");
483 }
484
485 /* Print allocno hard register subforest given by ROOTS and its LEVEL
486 to F. */
487 static void
488 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
489 int level)
490 {
491 int i;
492 allocno_hard_regs_node_t node;
493
494 for (node = roots; node != NULL; node = node->next)
495 {
496 fprintf (f, " ");
497 for (i = 0; i < level * 2; i++)
498 fprintf (f, " ");
499 fprintf (f, "%d:(", node->preorder_num);
500 print_hard_reg_set (f, node->hard_regs->set, false);
501 fprintf (f, ")@" HOST_WIDEST_INT_PRINT_DEC "\n", node->hard_regs->cost);
502 print_hard_regs_subforest (f, node->first, level + 1);
503 }
504 }
505
506 /* Print the allocno hard register forest to F. */
507 static void
508 print_hard_regs_forest (FILE *f)
509 {
510 fprintf (f, " Hard reg set forest:\n");
511 print_hard_regs_subforest (f, hard_regs_roots, 1);
512 }
513
514 /* Print the allocno hard register forest to stderr. */
515 void
516 ira_debug_hard_regs_forest (void)
517 {
518 print_hard_regs_forest (stderr);
519 }
520
521 /* Remove unused allocno hard registers nodes from forest given by its
522 *ROOTS. */
523 static void
524 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
525 {
526 allocno_hard_regs_node_t node, prev, next, last;
527
528 for (prev = NULL, node = *roots; node != NULL; node = next)
529 {
530 next = node->next;
531 if (node->used_p)
532 {
533 remove_unused_allocno_hard_regs_nodes (&node->first);
534 prev = node;
535 }
536 else
537 {
538 for (last = node->first;
539 last != NULL && last->next != NULL;
540 last = last->next)
541 ;
542 if (last != NULL)
543 {
544 if (prev == NULL)
545 *roots = node->first;
546 else
547 prev->next = node->first;
548 if (next != NULL)
549 next->prev = last;
550 last->next = next;
551 next = node->first;
552 }
553 else
554 {
555 if (prev == NULL)
556 *roots = next;
557 else
558 prev->next = next;
559 if (next != NULL)
560 next->prev = prev;
561 }
562 ira_free (node);
563 }
564 }
565 }
566
567 /* Set up fields preorder_num starting with START_NUM in all allocno
568 hard registers nodes in forest given by FIRST. Return biggest set
569 PREORDER_NUM increased by 1. */
570 static int
571 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
572 allocno_hard_regs_node_t parent,
573 int start_num)
574 {
575 allocno_hard_regs_node_t node;
576
577 for (node = first; node != NULL; node = node->next)
578 {
579 node->preorder_num = start_num++;
580 node->parent = parent;
581 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
582 start_num);
583 }
584 return start_num;
585 }
586
587 /* Number of allocno hard registers nodes in the forest. */
588 static int allocno_hard_regs_nodes_num;
589
590 /* Table preorder number of allocno hard registers node in the forest
591 -> the allocno hard registers node. */
592 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
593
594 /* See below. */
595 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
596
597 /* The structure is used to describes all subnodes (not only immediate
598 ones) in the mentioned above tree for given allocno hard register
599 node. The usage of such data accelerates calculation of
600 colorability of given allocno. */
601 struct allocno_hard_regs_subnode
602 {
603 /* The conflict size of conflicting allocnos whose hard register
604 sets are equal sets (plus supersets if given node is given
605 allocno hard registers node) of one in the given node. */
606 int left_conflict_size;
607 /* The summary conflict size of conflicting allocnos whose hard
608 register sets are strict subsets of one in the given node.
609 Overall conflict size is
610 left_conflict_subnodes_size
611 + MIN (max_node_impact - left_conflict_subnodes_size,
612 left_conflict_size)
613 */
614 short left_conflict_subnodes_size;
615 short max_node_impact;
616 };
617
618 /* Container for hard regs subnodes of all allocnos. */
619 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
620
621 /* Table (preorder number of allocno hard registers node in the
622 forest, preorder number of allocno hard registers subnode) -> index
623 of the subnode relative to the node. -1 if it is not a
624 subnode. */
625 static int *allocno_hard_regs_subnode_index;
626
627 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
628 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
629 static void
630 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
631 {
632 allocno_hard_regs_node_t node, parent;
633 int index;
634
635 for (node = first; node != NULL; node = node->next)
636 {
637 allocno_hard_regs_nodes[node->preorder_num] = node;
638 for (parent = node; parent != NULL; parent = parent->parent)
639 {
640 index = parent->preorder_num * allocno_hard_regs_nodes_num;
641 allocno_hard_regs_subnode_index[index + node->preorder_num]
642 = node->preorder_num - parent->preorder_num;
643 }
644 setup_allocno_hard_regs_subnode_index (node->first);
645 }
646 }
647
648 /* Count all allocno hard registers nodes in tree ROOT. */
649 static int
650 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
651 {
652 int len = 1;
653
654 for (root = root->first; root != NULL; root = root->next)
655 len += get_allocno_hard_regs_subnodes_num (root);
656 return len;
657 }
658
659 /* Build the forest of allocno hard registers nodes and assign each
660 allocno a node from the forest. */
661 static void
662 form_allocno_hard_regs_nodes_forest (void)
663 {
664 unsigned int i, j, size, len;
665 int start;
666 ira_allocno_t a;
667 allocno_hard_regs_t hv;
668 bitmap_iterator bi;
669 HARD_REG_SET temp;
670 allocno_hard_regs_node_t node, allocno_hard_regs_node;
671 allocno_color_data_t allocno_data;
672
673 node_check_tick = 0;
674 init_allocno_hard_regs ();
675 hard_regs_roots = NULL;
676 hard_regs_node_vec = VEC_alloc (allocno_hard_regs_node_t, heap, 100);
677 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
678 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
679 {
680 CLEAR_HARD_REG_SET (temp);
681 SET_HARD_REG_BIT (temp, i);
682 hv = add_allocno_hard_regs (temp, 0);
683 node = create_new_allocno_hard_regs_node (hv);
684 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
685 }
686 start = VEC_length (allocno_hard_regs_t, allocno_hard_regs_vec);
687 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
688 {
689 a = ira_allocnos[i];
690 allocno_data = ALLOCNO_COLOR_DATA (a);
691
692 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
693 continue;
694 hv = (add_allocno_hard_regs
695 (allocno_data->profitable_hard_regs,
696 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
697 }
698 SET_HARD_REG_SET (temp);
699 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
700 add_allocno_hard_regs (temp, 0);
701 qsort (VEC_address (allocno_hard_regs_t, allocno_hard_regs_vec) + start,
702 VEC_length (allocno_hard_regs_t, allocno_hard_regs_vec) - start,
703 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
704 for (i = start;
705 VEC_iterate (allocno_hard_regs_t, allocno_hard_regs_vec, i, hv);
706 i++)
707 {
708 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
709 ira_assert (VEC_length (allocno_hard_regs_node_t,
710 hard_regs_node_vec) == 0);
711 }
712 /* We need to set up parent fields for right work of
713 first_common_ancestor_node. */
714 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
715 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
716 {
717 a = ira_allocnos[i];
718 allocno_data = ALLOCNO_COLOR_DATA (a);
719 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
720 continue;
721 VEC_truncate (allocno_hard_regs_node_t, hard_regs_node_vec, 0);
722 collect_allocno_hard_regs_cover (hard_regs_roots,
723 allocno_data->profitable_hard_regs);
724 allocno_hard_regs_node = NULL;
725 for (j = 0;
726 VEC_iterate (allocno_hard_regs_node_t, hard_regs_node_vec,
727 j, node);
728 j++)
729 allocno_hard_regs_node
730 = (j == 0
731 ? node
732 : first_common_ancestor_node (node, allocno_hard_regs_node));
733 /* That is a temporary storage. */
734 allocno_hard_regs_node->used_p = true;
735 allocno_data->hard_regs_node = allocno_hard_regs_node;
736 }
737 ira_assert (hard_regs_roots->next == NULL);
738 hard_regs_roots->used_p = true;
739 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
740 allocno_hard_regs_nodes_num
741 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
742 allocno_hard_regs_nodes
743 = ((allocno_hard_regs_node_t *)
744 ira_allocate (allocno_hard_regs_nodes_num
745 * sizeof (allocno_hard_regs_node_t)));
746 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
747 allocno_hard_regs_subnode_index
748 = (int *) ira_allocate (size * sizeof (int));
749 for (i = 0; i < size; i++)
750 allocno_hard_regs_subnode_index[i] = -1;
751 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
752 start = 0;
753 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
754 {
755 a = ira_allocnos[i];
756 allocno_data = ALLOCNO_COLOR_DATA (a);
757 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
758 continue;
759 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
760 allocno_data->hard_regs_subnodes_start = start;
761 allocno_data->hard_regs_subnodes_num = len;
762 start += len;
763 }
764 allocno_hard_regs_subnodes
765 = ((allocno_hard_regs_subnode_t)
766 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
767 VEC_free (allocno_hard_regs_node_t, heap, hard_regs_node_vec);
768 }
769
770 /* Free tree of allocno hard registers nodes given by its ROOT. */
771 static void
772 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
773 {
774 allocno_hard_regs_node_t child, next;
775
776 for (child = root->first; child != NULL; child = next)
777 {
778 next = child->next;
779 finish_allocno_hard_regs_nodes_tree (child);
780 }
781 ira_free (root);
782 }
783
784 /* Finish work with the forest of allocno hard registers nodes. */
785 static void
786 finish_allocno_hard_regs_nodes_forest (void)
787 {
788 allocno_hard_regs_node_t node, next;
789
790 ira_free (allocno_hard_regs_subnodes);
791 for (node = hard_regs_roots; node != NULL; node = next)
792 {
793 next = node->next;
794 finish_allocno_hard_regs_nodes_tree (node);
795 }
796 ira_free (allocno_hard_regs_nodes);
797 ira_free (allocno_hard_regs_subnode_index);
798 finish_allocno_hard_regs ();
799 }
800
801 /* Set up left conflict sizes and left conflict subnodes sizes of hard
802 registers subnodes of allocno A. Return TRUE if allocno A is
803 trivially colorable. */
804 static bool
805 setup_left_conflict_sizes_p (ira_allocno_t a)
806 {
807 int i, k, nobj, start;
808 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
809 allocno_color_data_t data;
810 HARD_REG_SET profitable_hard_regs;
811 allocno_hard_regs_subnode_t subnodes;
812 allocno_hard_regs_node_t node;
813 HARD_REG_SET node_set;
814
815 nobj = ALLOCNO_NUM_OBJECTS (a);
816 conflict_size = 0;
817 data = ALLOCNO_COLOR_DATA (a);
818 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
819 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
820 node = data->hard_regs_node;
821 node_preorder_num = node->preorder_num;
822 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
823 node_check_tick++;
824 for (k = 0; k < nobj; k++)
825 {
826 ira_object_t obj = ALLOCNO_OBJECT (a, k);
827 ira_object_t conflict_obj;
828 ira_object_conflict_iterator oci;
829
830 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
831 {
832 int size;
833 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
834 allocno_hard_regs_node_t conflict_node, temp_node;
835 HARD_REG_SET conflict_node_set;
836 allocno_color_data_t conflict_data;
837
838 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
839 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
840 || ! hard_reg_set_intersect_p (profitable_hard_regs,
841 conflict_data
842 ->profitable_hard_regs))
843 continue;
844 conflict_node = conflict_data->hard_regs_node;
845 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
846 if (hard_reg_set_subset_p (node_set, conflict_node_set))
847 temp_node = node;
848 else
849 {
850 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
851 temp_node = conflict_node;
852 }
853 if (temp_node->check != node_check_tick)
854 {
855 temp_node->check = node_check_tick;
856 temp_node->conflict_size = 0;
857 }
858 size = (ira_reg_class_max_nregs
859 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
860 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
861 /* We will deal with the subwords individually. */
862 size = 1;
863 temp_node->conflict_size += size;
864 }
865 }
866 for (i = 0; i < data->hard_regs_subnodes_num; i++)
867 {
868 allocno_hard_regs_node_t temp_node;
869
870 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
871 ira_assert (temp_node->preorder_num == i + node_preorder_num);
872 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
873 ? 0 : temp_node->conflict_size);
874 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
875 profitable_hard_regs))
876 subnodes[i].max_node_impact = temp_node->hard_regs_num;
877 else
878 {
879 HARD_REG_SET temp_set;
880 int j, n, hard_regno;
881 enum reg_class aclass;
882
883 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
884 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
885 aclass = ALLOCNO_CLASS (a);
886 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
887 {
888 hard_regno = ira_class_hard_regs[aclass][j];
889 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
890 n++;
891 }
892 subnodes[i].max_node_impact = n;
893 }
894 subnodes[i].left_conflict_subnodes_size = 0;
895 }
896 start = node_preorder_num * allocno_hard_regs_nodes_num;
897 for (i = data->hard_regs_subnodes_num - 1; i >= 0; i--)
898 {
899 int size, parent_i;
900 allocno_hard_regs_node_t parent;
901
902 size = (subnodes[i].left_conflict_subnodes_size
903 + MIN (subnodes[i].max_node_impact
904 - subnodes[i].left_conflict_subnodes_size,
905 subnodes[i].left_conflict_size));
906 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
907 if (parent == NULL)
908 continue;
909 parent_i
910 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
911 if (parent_i < 0)
912 continue;
913 subnodes[parent_i].left_conflict_subnodes_size += size;
914 }
915 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
916 conflict_size
917 += (left_conflict_subnodes_size
918 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
919 subnodes[0].left_conflict_size));
920 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
921 data->colorable_p = conflict_size <= data->available_regs_num;
922 return data->colorable_p;
923 }
924
925 /* Update left conflict sizes of hard registers subnodes of allocno A
926 after removing allocno REMOVED_A with SIZE from the conflict graph.
927 Return TRUE if A is trivially colorable. */
928 static bool
929 update_left_conflict_sizes_p (ira_allocno_t a,
930 ira_allocno_t removed_a, int size)
931 {
932 int i, conflict_size, before_conflict_size, diff, start;
933 int node_preorder_num, parent_i;
934 allocno_hard_regs_node_t node, removed_node, parent;
935 allocno_hard_regs_subnode_t subnodes;
936 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
937
938 ira_assert (! data->colorable_p);
939 node = data->hard_regs_node;
940 node_preorder_num = node->preorder_num;
941 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
942 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
943 node->hard_regs->set)
944 || hard_reg_set_subset_p (node->hard_regs->set,
945 removed_node->hard_regs->set));
946 start = node_preorder_num * allocno_hard_regs_nodes_num;
947 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
948 if (i < 0)
949 i = 0;
950 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
951 before_conflict_size
952 = (subnodes[i].left_conflict_subnodes_size
953 + MIN (subnodes[i].max_node_impact
954 - subnodes[i].left_conflict_subnodes_size,
955 subnodes[i].left_conflict_size));
956 subnodes[i].left_conflict_size -= size;
957 for (;;)
958 {
959 conflict_size
960 = (subnodes[i].left_conflict_subnodes_size
961 + MIN (subnodes[i].max_node_impact
962 - subnodes[i].left_conflict_subnodes_size,
963 subnodes[i].left_conflict_size));
964 if ((diff = before_conflict_size - conflict_size) == 0)
965 break;
966 ira_assert (conflict_size < before_conflict_size);
967 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
968 if (parent == NULL)
969 break;
970 parent_i
971 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
972 if (parent_i < 0)
973 break;
974 i = parent_i;
975 before_conflict_size
976 = (subnodes[i].left_conflict_subnodes_size
977 + MIN (subnodes[i].max_node_impact
978 - subnodes[i].left_conflict_subnodes_size,
979 subnodes[i].left_conflict_size));
980 subnodes[i].left_conflict_subnodes_size -= diff;
981 }
982 if (i != 0
983 || (conflict_size
984 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
985 > data->available_regs_num))
986 return false;
987 data->colorable_p = true;
988 return true;
989 }
990
991 /* Return true if allocno A has empty profitable hard regs. */
992 static bool
993 empty_profitable_hard_regs (ira_allocno_t a)
994 {
995 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
996
997 return hard_reg_set_empty_p (data->profitable_hard_regs);
998 }
999
1000 /* Set up profitable hard registers for each allocno being
1001 colored. */
1002 static void
1003 setup_profitable_hard_regs (void)
1004 {
1005 unsigned int i;
1006 int j, k, nobj, hard_regno, nregs, class_size;
1007 ira_allocno_t a;
1008 bitmap_iterator bi;
1009 enum reg_class aclass;
1010 enum machine_mode mode;
1011 allocno_color_data_t data;
1012
1013 /* Initial set up from allocno classes and explicitly conflicting
1014 hard regs. */
1015 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1016 {
1017 a = ira_allocnos[i];
1018 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1019 continue;
1020 data = ALLOCNO_COLOR_DATA (a);
1021 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1022 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a))
1023 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1024 else
1025 {
1026 COPY_HARD_REG_SET (data->profitable_hard_regs,
1027 reg_class_contents[aclass]);
1028 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1029 ira_no_alloc_regs);
1030 nobj = ALLOCNO_NUM_OBJECTS (a);
1031 for (k = 0; k < nobj; k++)
1032 {
1033 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1034
1035 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1036 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1037 }
1038 }
1039 }
1040 /* Exclude hard regs already assigned for conflicting objects. */
1041 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1042 {
1043 a = ira_allocnos[i];
1044 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1045 || ! ALLOCNO_ASSIGNED_P (a)
1046 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1047 continue;
1048 mode = ALLOCNO_MODE (a);
1049 nregs = hard_regno_nregs[hard_regno][mode];
1050 nobj = ALLOCNO_NUM_OBJECTS (a);
1051 for (k = 0; k < nobj; k++)
1052 {
1053 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1054 ira_object_t conflict_obj;
1055 ira_object_conflict_iterator oci;
1056
1057 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1058 {
1059 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1060
1061 /* We can process the conflict allocno repeatedly with
1062 the same result. */
1063 if (nregs == nobj && nregs > 1)
1064 {
1065 int num = OBJECT_SUBWORD (conflict_obj);
1066
1067 if (REG_WORDS_BIG_ENDIAN)
1068 CLEAR_HARD_REG_BIT
1069 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1070 hard_regno + nobj - num - 1);
1071 else
1072 CLEAR_HARD_REG_BIT
1073 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1074 hard_regno + num);
1075 }
1076 else
1077 AND_COMPL_HARD_REG_SET
1078 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1079 ira_reg_mode_hard_regset[hard_regno][mode]);
1080 }
1081 }
1082 }
1083 /* Exclude too costly hard regs. */
1084 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1085 {
1086 int min_cost = INT_MAX;
1087 int *costs;
1088
1089 a = ira_allocnos[i];
1090 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1091 || empty_profitable_hard_regs (a))
1092 continue;
1093 data = ALLOCNO_COLOR_DATA (a);
1094 mode = ALLOCNO_MODE (a);
1095 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1096 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1097 {
1098 class_size = ira_class_hard_regs_num[aclass];
1099 for (j = 0; j < class_size; j++)
1100 {
1101 hard_regno = ira_class_hard_regs[aclass][j];
1102 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1103 hard_regno))
1104 continue;
1105 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j])
1106 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1107 hard_regno);
1108 else if (min_cost > costs[j])
1109 min_cost = costs[j];
1110 }
1111 }
1112 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1113 < ALLOCNO_UPDATED_CLASS_COST (a))
1114 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1115 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1116 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1117 }
1118 }
1119
1120 \f
1121
1122 /* This page contains functions used to choose hard registers for
1123 allocnos. */
1124
1125 /* Array whose element value is TRUE if the corresponding hard
1126 register was already allocated for an allocno. */
1127 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1128
1129 /* Describes one element in a queue of allocnos whose costs need to be
1130 updated. Each allocno in the queue is known to have an allocno
1131 class. */
1132 struct update_cost_queue_elem
1133 {
1134 /* This element is in the queue iff CHECK == update_cost_check. */
1135 int check;
1136
1137 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1138 connecting this allocno to the one being allocated. */
1139 int divisor;
1140
1141 /* The next allocno in the queue, or null if this is the last element. */
1142 ira_allocno_t next;
1143 };
1144
1145 /* The first element in a queue of allocnos whose copy costs need to be
1146 updated. Null if the queue is empty. */
1147 static ira_allocno_t update_cost_queue;
1148
1149 /* The last element in the queue described by update_cost_queue.
1150 Not valid if update_cost_queue is null. */
1151 static struct update_cost_queue_elem *update_cost_queue_tail;
1152
1153 /* A pool of elements in the queue described by update_cost_queue.
1154 Elements are indexed by ALLOCNO_NUM. */
1155 static struct update_cost_queue_elem *update_cost_queue_elems;
1156
1157 /* The current value of update_copy_cost call count. */
1158 static int update_cost_check;
1159
1160 /* Allocate and initialize data necessary for function
1161 update_copy_costs. */
1162 static void
1163 initiate_cost_update (void)
1164 {
1165 size_t size;
1166
1167 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1168 update_cost_queue_elems
1169 = (struct update_cost_queue_elem *) ira_allocate (size);
1170 memset (update_cost_queue_elems, 0, size);
1171 update_cost_check = 0;
1172 }
1173
1174 /* Deallocate data used by function update_copy_costs. */
1175 static void
1176 finish_cost_update (void)
1177 {
1178 ira_free (update_cost_queue_elems);
1179 }
1180
1181 /* When we traverse allocnos to update hard register costs, the cost
1182 divisor will be multiplied by the following macro value for each
1183 hop from given allocno to directly connected allocnos. */
1184 #define COST_HOP_DIVISOR 4
1185
1186 /* Start a new cost-updating pass. */
1187 static void
1188 start_update_cost (void)
1189 {
1190 update_cost_check++;
1191 update_cost_queue = NULL;
1192 }
1193
1194 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue, unless
1195 ALLOCNO is already in the queue, or has NO_REGS class. */
1196 static inline void
1197 queue_update_cost (ira_allocno_t allocno, int divisor)
1198 {
1199 struct update_cost_queue_elem *elem;
1200
1201 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1202 if (elem->check != update_cost_check
1203 && ALLOCNO_CLASS (allocno) != NO_REGS)
1204 {
1205 elem->check = update_cost_check;
1206 elem->divisor = divisor;
1207 elem->next = NULL;
1208 if (update_cost_queue == NULL)
1209 update_cost_queue = allocno;
1210 else
1211 update_cost_queue_tail->next = allocno;
1212 update_cost_queue_tail = elem;
1213 }
1214 }
1215
1216 /* Try to remove the first element from update_cost_queue. Return false
1217 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
1218 the removed element. */
1219 static inline bool
1220 get_next_update_cost (ira_allocno_t *allocno, int *divisor)
1221 {
1222 struct update_cost_queue_elem *elem;
1223
1224 if (update_cost_queue == NULL)
1225 return false;
1226
1227 *allocno = update_cost_queue;
1228 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1229 *divisor = elem->divisor;
1230 update_cost_queue = elem->next;
1231 return true;
1232 }
1233
1234 /* Update the cost of allocnos to increase chances to remove some
1235 copies as the result of subsequent assignment. */
1236 static void
1237 update_copy_costs (ira_allocno_t allocno, bool decr_p)
1238 {
1239 int i, cost, update_cost, hard_regno, divisor;
1240 enum machine_mode mode;
1241 enum reg_class rclass, aclass;
1242 ira_allocno_t another_allocno;
1243 ira_copy_t cp, next_cp;
1244
1245 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1246 ira_assert (hard_regno >= 0);
1247
1248 aclass = ALLOCNO_CLASS (allocno);
1249 if (aclass == NO_REGS)
1250 return;
1251 i = ira_class_hard_reg_index[aclass][hard_regno];
1252 ira_assert (i >= 0);
1253 rclass = REGNO_REG_CLASS (hard_regno);
1254
1255 start_update_cost ();
1256 divisor = 1;
1257 do
1258 {
1259 mode = ALLOCNO_MODE (allocno);
1260 ira_init_register_move_cost_if_necessary (mode);
1261 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1262 {
1263 if (cp->first == allocno)
1264 {
1265 next_cp = cp->next_first_allocno_copy;
1266 another_allocno = cp->second;
1267 }
1268 else if (cp->second == allocno)
1269 {
1270 next_cp = cp->next_second_allocno_copy;
1271 another_allocno = cp->first;
1272 }
1273 else
1274 gcc_unreachable ();
1275
1276 aclass = ALLOCNO_CLASS (another_allocno);
1277 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1278 hard_regno)
1279 || ALLOCNO_ASSIGNED_P (another_allocno))
1280 continue;
1281
1282 cost = (cp->second == allocno
1283 ? ira_register_move_cost[mode][rclass][aclass]
1284 : ira_register_move_cost[mode][aclass][rclass]);
1285 if (decr_p)
1286 cost = -cost;
1287
1288 update_cost = cp->freq * cost / divisor;
1289 if (update_cost == 0)
1290 continue;
1291
1292 ira_allocate_and_set_or_copy_costs
1293 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), aclass,
1294 ALLOCNO_UPDATED_CLASS_COST (another_allocno),
1295 ALLOCNO_HARD_REG_COSTS (another_allocno));
1296 ira_allocate_and_set_or_copy_costs
1297 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1298 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1299 i = ira_class_hard_reg_index[aclass][hard_regno];
1300 if (i < 0)
1301 continue;
1302 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
1303 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
1304 += update_cost;
1305
1306 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
1307 }
1308 }
1309 while (get_next_update_cost (&allocno, &divisor));
1310 }
1311
1312 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1313 of ACLASS by conflict costs of the unassigned allocnos
1314 connected by copies with allocnos in update_cost_queue. This
1315 update increases chances to remove some copies. */
1316 static void
1317 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1318 bool decr_p)
1319 {
1320 int i, cost, class_size, freq, mult, div, divisor;
1321 int index, hard_regno;
1322 int *conflict_costs;
1323 bool cont_p;
1324 enum reg_class another_aclass;
1325 ira_allocno_t allocno, another_allocno;
1326 ira_copy_t cp, next_cp;
1327
1328 while (get_next_update_cost (&allocno, &divisor))
1329 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1330 {
1331 if (cp->first == allocno)
1332 {
1333 next_cp = cp->next_first_allocno_copy;
1334 another_allocno = cp->second;
1335 }
1336 else if (cp->second == allocno)
1337 {
1338 next_cp = cp->next_second_allocno_copy;
1339 another_allocno = cp->first;
1340 }
1341 else
1342 gcc_unreachable ();
1343 another_aclass = ALLOCNO_CLASS (another_allocno);
1344 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1345 || ALLOCNO_ASSIGNED_P (another_allocno)
1346 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1347 continue;
1348 class_size = ira_class_hard_regs_num[another_aclass];
1349 ira_allocate_and_copy_costs
1350 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1351 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1352 conflict_costs
1353 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1354 if (conflict_costs == NULL)
1355 cont_p = true;
1356 else
1357 {
1358 mult = cp->freq;
1359 freq = ALLOCNO_FREQ (another_allocno);
1360 if (freq == 0)
1361 freq = 1;
1362 div = freq * divisor;
1363 cont_p = false;
1364 for (i = class_size - 1; i >= 0; i--)
1365 {
1366 hard_regno = ira_class_hard_regs[another_aclass][i];
1367 ira_assert (hard_regno >= 0);
1368 index = ira_class_hard_reg_index[aclass][hard_regno];
1369 if (index < 0)
1370 continue;
1371 cost = conflict_costs [i] * mult / div;
1372 if (cost == 0)
1373 continue;
1374 cont_p = true;
1375 if (decr_p)
1376 cost = -cost;
1377 costs[index] += cost;
1378 }
1379 }
1380 /* Probably 5 hops will be enough. */
1381 if (cont_p
1382 && divisor <= (COST_HOP_DIVISOR
1383 * COST_HOP_DIVISOR
1384 * COST_HOP_DIVISOR
1385 * COST_HOP_DIVISOR))
1386 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
1387 }
1388 }
1389
1390 /* Set up conflicting (through CONFLICT_REGS) for each object of
1391 allocno A and the start allocno profitable regs (through
1392 START_PROFITABLE_REGS). Remember that the start profitable regs
1393 exclude hard regs which can not hold value of mode of allocno A.
1394 This covers mostly cases when multi-register value should be
1395 aligned. */
1396 static inline void
1397 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1398 HARD_REG_SET *conflict_regs,
1399 HARD_REG_SET *start_profitable_regs)
1400 {
1401 int i, nwords;
1402 ira_object_t obj;
1403
1404 nwords = ALLOCNO_NUM_OBJECTS (a);
1405 for (i = 0; i < nwords; i++)
1406 {
1407 obj = ALLOCNO_OBJECT (a, i);
1408 COPY_HARD_REG_SET (conflict_regs[i],
1409 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1410 }
1411 if (retry_p)
1412 {
1413 COPY_HARD_REG_SET (*start_profitable_regs,
1414 reg_class_contents[ALLOCNO_CLASS (a)]);
1415 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1416 ira_prohibited_class_mode_regs
1417 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1418 }
1419 else
1420 COPY_HARD_REG_SET (*start_profitable_regs,
1421 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1422 }
1423
1424 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1425 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1426 static inline bool
1427 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1428 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1429 {
1430 int j, nwords, nregs;
1431 enum reg_class aclass;
1432 enum machine_mode mode;
1433
1434 aclass = ALLOCNO_CLASS (a);
1435 mode = ALLOCNO_MODE (a);
1436 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1437 hard_regno))
1438 return false;
1439 /* Checking only profitable hard regs. */
1440 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1441 return false;
1442 nregs = hard_regno_nregs[hard_regno][mode];
1443 nwords = ALLOCNO_NUM_OBJECTS (a);
1444 for (j = 0; j < nregs; j++)
1445 {
1446 int k;
1447 int set_to_test_start = 0, set_to_test_end = nwords;
1448
1449 if (nregs == nwords)
1450 {
1451 if (REG_WORDS_BIG_ENDIAN)
1452 set_to_test_start = nwords - j - 1;
1453 else
1454 set_to_test_start = j;
1455 set_to_test_end = set_to_test_start + 1;
1456 }
1457 for (k = set_to_test_start; k < set_to_test_end; k++)
1458 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1459 break;
1460 if (k != set_to_test_end)
1461 break;
1462 }
1463 return j == nregs;
1464 }
1465 #ifndef HONOR_REG_ALLOC_ORDER
1466
1467 /* Return number of registers needed to be saved and restored at
1468 function prologue/epilogue if we allocate HARD_REGNO to hold value
1469 of MODE. */
1470 static int
1471 calculate_saved_nregs (int hard_regno, enum machine_mode mode)
1472 {
1473 int i;
1474 int nregs = 0;
1475
1476 ira_assert (hard_regno >= 0);
1477 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1478 if (!allocated_hardreg_p[hard_regno + i]
1479 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1480 && !LOCAL_REGNO (hard_regno + i))
1481 nregs++;
1482 return nregs;
1483 }
1484 #endif
1485
1486 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1487 that the function called from function
1488 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1489 this case some allocno data are not defined or updated and we
1490 should not touch these data. The function returns true if we
1491 managed to assign a hard register to the allocno.
1492
1493 To assign a hard register, first of all we calculate all conflict
1494 hard registers which can come from conflicting allocnos with
1495 already assigned hard registers. After that we find first free
1496 hard register with the minimal cost. During hard register cost
1497 calculation we take conflict hard register costs into account to
1498 give a chance for conflicting allocnos to get a better hard
1499 register in the future.
1500
1501 If the best hard register cost is bigger than cost of memory usage
1502 for the allocno, we don't assign a hard register to given allocno
1503 at all.
1504
1505 If we assign a hard register to the allocno, we update costs of the
1506 hard register for allocnos connected by copies to improve a chance
1507 to coalesce insns represented by the copies when we assign hard
1508 registers to the allocnos connected by the copies. */
1509 static bool
1510 assign_hard_reg (ira_allocno_t a, bool retry_p)
1511 {
1512 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1513 int i, j, hard_regno, best_hard_regno, class_size;
1514 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1515 int *a_costs;
1516 enum reg_class aclass;
1517 enum machine_mode mode;
1518 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1519 #ifndef HONOR_REG_ALLOC_ORDER
1520 int saved_nregs;
1521 enum reg_class rclass;
1522 int add_cost;
1523 #endif
1524 #ifdef STACK_REGS
1525 bool no_stack_reg_p;
1526 #endif
1527
1528 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1529 get_conflict_and_start_profitable_regs (a, retry_p,
1530 conflicting_regs,
1531 &profitable_hard_regs);
1532 aclass = ALLOCNO_CLASS (a);
1533 class_size = ira_class_hard_regs_num[aclass];
1534 best_hard_regno = -1;
1535 memset (full_costs, 0, sizeof (int) * class_size);
1536 mem_cost = 0;
1537 memset (costs, 0, sizeof (int) * class_size);
1538 memset (full_costs, 0, sizeof (int) * class_size);
1539 #ifdef STACK_REGS
1540 no_stack_reg_p = false;
1541 #endif
1542 if (! retry_p)
1543 start_update_cost ();
1544 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1545
1546 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1547 aclass, ALLOCNO_HARD_REG_COSTS (a));
1548 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1549 #ifdef STACK_REGS
1550 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1551 #endif
1552 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1553 for (i = 0; i < class_size; i++)
1554 if (a_costs != NULL)
1555 {
1556 costs[i] += a_costs[i];
1557 full_costs[i] += a_costs[i];
1558 }
1559 else
1560 {
1561 costs[i] += cost;
1562 full_costs[i] += cost;
1563 }
1564 nwords = ALLOCNO_NUM_OBJECTS (a);
1565 curr_allocno_process++;
1566 for (word = 0; word < nwords; word++)
1567 {
1568 ira_object_t conflict_obj;
1569 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1570 ira_object_conflict_iterator oci;
1571
1572 /* Take preferences of conflicting allocnos into account. */
1573 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1574 {
1575 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1576 enum reg_class conflict_aclass;
1577
1578 /* Reload can give another class so we need to check all
1579 allocnos. */
1580 if (!retry_p
1581 && (!bitmap_bit_p (consideration_allocno_bitmap,
1582 ALLOCNO_NUM (conflict_a))
1583 || ((!ALLOCNO_ASSIGNED_P (conflict_a)
1584 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1585 && !(hard_reg_set_intersect_p
1586 (profitable_hard_regs,
1587 ALLOCNO_COLOR_DATA
1588 (conflict_a)->profitable_hard_regs)))))
1589 continue;
1590 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1591 ira_assert (ira_reg_classes_intersect_p
1592 [aclass][conflict_aclass]);
1593 if (ALLOCNO_ASSIGNED_P (conflict_a))
1594 {
1595 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1596 if (hard_regno >= 0
1597 && (ira_hard_reg_set_intersection_p
1598 (hard_regno, ALLOCNO_MODE (conflict_a),
1599 reg_class_contents[aclass])))
1600 {
1601 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1602 int conflict_nregs;
1603
1604 mode = ALLOCNO_MODE (conflict_a);
1605 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1606 if (conflict_nregs == n_objects && conflict_nregs > 1)
1607 {
1608 int num = OBJECT_SUBWORD (conflict_obj);
1609
1610 if (REG_WORDS_BIG_ENDIAN)
1611 SET_HARD_REG_BIT (conflicting_regs[word],
1612 hard_regno + n_objects - num - 1);
1613 else
1614 SET_HARD_REG_BIT (conflicting_regs[word],
1615 hard_regno + num);
1616 }
1617 else
1618 IOR_HARD_REG_SET
1619 (conflicting_regs[word],
1620 ira_reg_mode_hard_regset[hard_regno][mode]);
1621 if (hard_reg_set_subset_p (profitable_hard_regs,
1622 conflicting_regs[word]))
1623 goto fail;
1624 }
1625 }
1626 else if (! retry_p
1627 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1628 /* Don't process the conflict allocno twice. */
1629 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1630 != curr_allocno_process))
1631 {
1632 int k, *conflict_costs;
1633
1634 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1635 = curr_allocno_process;
1636 ira_allocate_and_copy_costs
1637 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1638 conflict_aclass,
1639 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1640 conflict_costs
1641 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1642 if (conflict_costs != NULL)
1643 for (j = class_size - 1; j >= 0; j--)
1644 {
1645 hard_regno = ira_class_hard_regs[aclass][j];
1646 ira_assert (hard_regno >= 0);
1647 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1648 if (k < 0)
1649 continue;
1650 full_costs[j] -= conflict_costs[k];
1651 }
1652 queue_update_cost (conflict_a, COST_HOP_DIVISOR);
1653 }
1654 }
1655 }
1656 if (! retry_p)
1657 /* Take into account preferences of allocnos connected by copies to
1658 the conflict allocnos. */
1659 update_conflict_hard_regno_costs (full_costs, aclass, true);
1660
1661 /* Take preferences of allocnos connected by copies into
1662 account. */
1663 if (! retry_p)
1664 {
1665 start_update_cost ();
1666 queue_update_cost (a, COST_HOP_DIVISOR);
1667 update_conflict_hard_regno_costs (full_costs, aclass, false);
1668 }
1669 min_cost = min_full_cost = INT_MAX;
1670 /* We don't care about giving callee saved registers to allocnos no
1671 living through calls because call clobbered registers are
1672 allocated first (it is usual practice to put them first in
1673 REG_ALLOC_ORDER). */
1674 mode = ALLOCNO_MODE (a);
1675 for (i = 0; i < class_size; i++)
1676 {
1677 hard_regno = ira_class_hard_regs[aclass][i];
1678 #ifdef STACK_REGS
1679 if (no_stack_reg_p
1680 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1681 continue;
1682 #endif
1683 if (! check_hard_reg_p (a, hard_regno,
1684 conflicting_regs, profitable_hard_regs))
1685 continue;
1686 cost = costs[i];
1687 full_cost = full_costs[i];
1688 #ifndef HONOR_REG_ALLOC_ORDER
1689 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1690 /* We need to save/restore the hard register in
1691 epilogue/prologue. Therefore we increase the cost. */
1692 {
1693 rclass = REGNO_REG_CLASS (hard_regno);
1694 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1695 + ira_memory_move_cost[mode][rclass][1])
1696 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1697 cost += add_cost;
1698 full_cost += add_cost;
1699 }
1700 #endif
1701 if (min_cost > cost)
1702 min_cost = cost;
1703 if (min_full_cost > full_cost)
1704 {
1705 min_full_cost = full_cost;
1706 best_hard_regno = hard_regno;
1707 ira_assert (hard_regno >= 0);
1708 }
1709 }
1710 if (min_full_cost > mem_cost)
1711 {
1712 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1713 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1714 mem_cost, min_full_cost);
1715 best_hard_regno = -1;
1716 }
1717 fail:
1718 if (best_hard_regno >= 0)
1719 {
1720 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1721 allocated_hardreg_p[best_hard_regno + i] = true;
1722 }
1723 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1724 ALLOCNO_ASSIGNED_P (a) = true;
1725 if (best_hard_regno >= 0)
1726 update_copy_costs (a, true);
1727 ira_assert (ALLOCNO_CLASS (a) == aclass);
1728 /* We don't need updated costs anymore: */
1729 ira_free_allocno_updated_costs (a);
1730 return best_hard_regno >= 0;
1731 }
1732
1733 \f
1734
1735 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
1736
1737 /* Bucket of allocnos that can colored currently without spilling. */
1738 static ira_allocno_t colorable_allocno_bucket;
1739
1740 /* Bucket of allocnos that might be not colored currently without
1741 spilling. */
1742 static ira_allocno_t uncolorable_allocno_bucket;
1743
1744 /* The current number of allocnos in the uncolorable_bucket. */
1745 static int uncolorable_allocnos_num;
1746
1747 /* Return the current spill priority of allocno A. The less the
1748 number, the more preferable the allocno for spilling. */
1749 static inline int
1750 allocno_spill_priority (ira_allocno_t a)
1751 {
1752 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1753
1754 return (data->temp
1755 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
1756 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1757 + 1));
1758 }
1759
1760 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
1761 before the call. */
1762 static void
1763 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
1764 {
1765 ira_allocno_t first_a;
1766 allocno_color_data_t data;
1767
1768 if (bucket_ptr == &uncolorable_allocno_bucket
1769 && ALLOCNO_CLASS (a) != NO_REGS)
1770 {
1771 uncolorable_allocnos_num++;
1772 ira_assert (uncolorable_allocnos_num > 0);
1773 }
1774 first_a = *bucket_ptr;
1775 data = ALLOCNO_COLOR_DATA (a);
1776 data->next_bucket_allocno = first_a;
1777 data->prev_bucket_allocno = NULL;
1778 if (first_a != NULL)
1779 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
1780 *bucket_ptr = a;
1781 }
1782
1783 /* Compare two allocnos to define which allocno should be pushed first
1784 into the coloring stack. If the return is a negative number, the
1785 allocno given by the first parameter will be pushed first. In this
1786 case such allocno has less priority than the second one and the
1787 hard register will be assigned to it after assignment to the second
1788 one. As the result of such assignment order, the second allocno
1789 has a better chance to get the best hard register. */
1790 static int
1791 bucket_allocno_compare_func (const void *v1p, const void *v2p)
1792 {
1793 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1794 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1795 int diff, a1_freq, a2_freq, a1_num, a2_num;
1796 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
1797
1798 /* Push pseudos requiring less hard registers first. It means that
1799 we will assign pseudos requiring more hard registers first
1800 avoiding creation small holes in free hard register file into
1801 which the pseudos requiring more hard registers can not fit. */
1802 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
1803 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
1804 return diff;
1805 a1_freq = ALLOCNO_FREQ (a1);
1806 a2_freq = ALLOCNO_FREQ (a2);
1807 if ((diff = a1_freq - a2_freq) != 0)
1808 return diff;
1809 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
1810 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
1811 if ((diff = a2_num - a1_num) != 0)
1812 return diff;
1813 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
1814 }
1815
1816 /* Sort bucket *BUCKET_PTR and return the result through
1817 BUCKET_PTR. */
1818 static void
1819 sort_bucket (ira_allocno_t *bucket_ptr,
1820 int (*compare_func) (const void *, const void *))
1821 {
1822 ira_allocno_t a, head;
1823 int n;
1824
1825 for (n = 0, a = *bucket_ptr;
1826 a != NULL;
1827 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
1828 sorted_allocnos[n++] = a;
1829 if (n <= 1)
1830 return;
1831 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
1832 head = NULL;
1833 for (n--; n >= 0; n--)
1834 {
1835 a = sorted_allocnos[n];
1836 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
1837 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
1838 if (head != NULL)
1839 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
1840 head = a;
1841 }
1842 *bucket_ptr = head;
1843 }
1844
1845 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
1846 their priority. ALLOCNO should be not in a bucket before the
1847 call. */
1848 static void
1849 add_allocno_to_ordered_bucket (ira_allocno_t allocno,
1850 ira_allocno_t *bucket_ptr)
1851 {
1852 ira_allocno_t before, after;
1853
1854 if (bucket_ptr == &uncolorable_allocno_bucket
1855 && ALLOCNO_CLASS (allocno) != NO_REGS)
1856 {
1857 uncolorable_allocnos_num++;
1858 ira_assert (uncolorable_allocnos_num > 0);
1859 }
1860 for (before = *bucket_ptr, after = NULL;
1861 before != NULL;
1862 after = before,
1863 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
1864 if (bucket_allocno_compare_func (&allocno, &before) < 0)
1865 break;
1866 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
1867 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
1868 if (after == NULL)
1869 *bucket_ptr = allocno;
1870 else
1871 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
1872 if (before != NULL)
1873 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
1874 }
1875
1876 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
1877 the call. */
1878 static void
1879 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
1880 {
1881 ira_allocno_t prev_allocno, next_allocno;
1882
1883 if (bucket_ptr == &uncolorable_allocno_bucket
1884 && ALLOCNO_CLASS (allocno) != NO_REGS)
1885 {
1886 uncolorable_allocnos_num--;
1887 ira_assert (uncolorable_allocnos_num >= 0);
1888 }
1889 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
1890 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
1891 if (prev_allocno != NULL)
1892 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
1893 else
1894 {
1895 ira_assert (*bucket_ptr == allocno);
1896 *bucket_ptr = next_allocno;
1897 }
1898 if (next_allocno != NULL)
1899 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
1900 }
1901
1902 /* Put allocno A onto the coloring stack without removing it from its
1903 bucket. Pushing allocno to the coloring stack can result in moving
1904 conflicting allocnos from the uncolorable bucket to the colorable
1905 one. */
1906 static void
1907 push_allocno_to_stack (ira_allocno_t a)
1908 {
1909 enum reg_class aclass;
1910 allocno_color_data_t data, conflict_data;
1911 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
1912
1913 data = ALLOCNO_COLOR_DATA (a);
1914 data->in_graph_p = false;
1915 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
1916 aclass = ALLOCNO_CLASS (a);
1917 if (aclass == NO_REGS)
1918 return;
1919 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
1920 if (n > 1)
1921 {
1922 /* We will deal with the subwords individually. */
1923 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
1924 size = 1;
1925 }
1926 for (i = 0; i < n; i++)
1927 {
1928 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1929 ira_object_t conflict_obj;
1930 ira_object_conflict_iterator oci;
1931
1932 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1933 {
1934 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1935
1936 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
1937 if (conflict_data->colorable_p
1938 || ! conflict_data->in_graph_p
1939 || ALLOCNO_ASSIGNED_P (conflict_a)
1940 || !(hard_reg_set_intersect_p
1941 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
1942 conflict_data->profitable_hard_regs)))
1943 continue;
1944 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
1945 ALLOCNO_NUM (conflict_a)));
1946 if (update_left_conflict_sizes_p (conflict_a, a, size))
1947 {
1948 delete_allocno_from_bucket
1949 (conflict_a, &uncolorable_allocno_bucket);
1950 add_allocno_to_ordered_bucket
1951 (conflict_a, &colorable_allocno_bucket);
1952 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1953 {
1954 fprintf (ira_dump_file, " Making");
1955 ira_print_expanded_allocno (conflict_a);
1956 fprintf (ira_dump_file, " colorable\n");
1957 }
1958 }
1959
1960 }
1961 }
1962 }
1963
1964 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
1965 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
1966 static void
1967 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
1968 {
1969 if (colorable_p)
1970 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
1971 else
1972 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
1973 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1974 {
1975 fprintf (ira_dump_file, " Pushing");
1976 ira_print_expanded_allocno (allocno);
1977 if (colorable_p)
1978 fprintf (ira_dump_file, "(cost %d)\n",
1979 ALLOCNO_COLOR_DATA (allocno)->temp);
1980 else
1981 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
1982 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
1983 allocno_spill_priority (allocno),
1984 ALLOCNO_COLOR_DATA (allocno)->temp);
1985 }
1986 if (! colorable_p)
1987 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
1988 push_allocno_to_stack (allocno);
1989 }
1990
1991 /* Put all allocnos from colorable bucket onto the coloring stack. */
1992 static void
1993 push_only_colorable (void)
1994 {
1995 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
1996 for (;colorable_allocno_bucket != NULL;)
1997 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
1998 }
1999
2000 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2001 loop given by its LOOP_NODE. */
2002 int
2003 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2004 {
2005 int freq, i;
2006 edge_iterator ei;
2007 edge e;
2008 VEC (edge, heap) *edges;
2009
2010 ira_assert (current_loops != NULL && loop_node->loop != NULL
2011 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2012 freq = 0;
2013 if (! exit_p)
2014 {
2015 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2016 if (e->src != loop_node->loop->latch
2017 && (regno < 0
2018 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
2019 && bitmap_bit_p (DF_LR_IN (e->dest), regno))))
2020 freq += EDGE_FREQUENCY (e);
2021 }
2022 else
2023 {
2024 edges = get_loop_exit_edges (loop_node->loop);
2025 FOR_EACH_VEC_ELT (edge, edges, i, e)
2026 if (regno < 0
2027 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
2028 && bitmap_bit_p (DF_LR_IN (e->dest), regno)))
2029 freq += EDGE_FREQUENCY (e);
2030 VEC_free (edge, heap, edges);
2031 }
2032
2033 return REG_FREQ_FROM_EDGE_FREQ (freq);
2034 }
2035
2036 /* Calculate and return the cost of putting allocno A into memory. */
2037 static int
2038 calculate_allocno_spill_cost (ira_allocno_t a)
2039 {
2040 int regno, cost;
2041 enum machine_mode mode;
2042 enum reg_class rclass;
2043 ira_allocno_t parent_allocno;
2044 ira_loop_tree_node_t parent_node, loop_node;
2045
2046 regno = ALLOCNO_REGNO (a);
2047 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2048 if (ALLOCNO_CAP (a) != NULL)
2049 return cost;
2050 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2051 if ((parent_node = loop_node->parent) == NULL)
2052 return cost;
2053 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2054 return cost;
2055 mode = ALLOCNO_MODE (a);
2056 rclass = ALLOCNO_CLASS (a);
2057 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2058 cost -= (ira_memory_move_cost[mode][rclass][0]
2059 * ira_loop_edge_freq (loop_node, regno, true)
2060 + ira_memory_move_cost[mode][rclass][1]
2061 * ira_loop_edge_freq (loop_node, regno, false));
2062 else
2063 {
2064 ira_init_register_move_cost_if_necessary (mode);
2065 cost += ((ira_memory_move_cost[mode][rclass][1]
2066 * ira_loop_edge_freq (loop_node, regno, true)
2067 + ira_memory_move_cost[mode][rclass][0]
2068 * ira_loop_edge_freq (loop_node, regno, false))
2069 - (ira_register_move_cost[mode][rclass][rclass]
2070 * (ira_loop_edge_freq (loop_node, regno, false)
2071 + ira_loop_edge_freq (loop_node, regno, true))));
2072 }
2073 return cost;
2074 }
2075
2076 /* Used for sorting allocnos for spilling. */
2077 static inline int
2078 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2079 {
2080 int pri1, pri2, diff;
2081
2082 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2083 return 1;
2084 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2085 return -1;
2086 pri1 = allocno_spill_priority (a1);
2087 pri2 = allocno_spill_priority (a2);
2088 if ((diff = pri1 - pri2) != 0)
2089 return diff;
2090 if ((diff
2091 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2092 return diff;
2093 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2094 }
2095
2096 /* Used for sorting allocnos for spilling. */
2097 static int
2098 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2099 {
2100 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2101 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2102
2103 return allocno_spill_priority_compare (p1, p2);
2104 }
2105
2106 /* Push allocnos to the coloring stack. The order of allocnos in the
2107 stack defines the order for the subsequent coloring. */
2108 static void
2109 push_allocnos_to_stack (void)
2110 {
2111 ira_allocno_t a;
2112 int cost;
2113
2114 /* Calculate uncolorable allocno spill costs. */
2115 for (a = uncolorable_allocno_bucket;
2116 a != NULL;
2117 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2118 if (ALLOCNO_CLASS (a) != NO_REGS)
2119 {
2120 cost = calculate_allocno_spill_cost (a);
2121 /* ??? Remove cost of copies between the coalesced
2122 allocnos. */
2123 ALLOCNO_COLOR_DATA (a)->temp = cost;
2124 }
2125 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2126 for (;;)
2127 {
2128 push_only_colorable ();
2129 a = uncolorable_allocno_bucket;
2130 if (a == NULL)
2131 break;
2132 remove_allocno_from_bucket_and_push (a, false);
2133 }
2134 ira_assert (colorable_allocno_bucket == NULL
2135 && uncolorable_allocno_bucket == NULL);
2136 ira_assert (uncolorable_allocnos_num == 0);
2137 }
2138
2139 /* Pop the coloring stack and assign hard registers to the popped
2140 allocnos. */
2141 static void
2142 pop_allocnos_from_stack (void)
2143 {
2144 ira_allocno_t allocno;
2145 enum reg_class aclass;
2146
2147 for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
2148 {
2149 allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
2150 aclass = ALLOCNO_CLASS (allocno);
2151 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2152 {
2153 fprintf (ira_dump_file, " Popping");
2154 ira_print_expanded_allocno (allocno);
2155 fprintf (ira_dump_file, " -- ");
2156 }
2157 if (aclass == NO_REGS)
2158 {
2159 ALLOCNO_HARD_REGNO (allocno) = -1;
2160 ALLOCNO_ASSIGNED_P (allocno) = true;
2161 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2162 ira_assert
2163 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2164 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2165 fprintf (ira_dump_file, "assign memory\n");
2166 }
2167 else if (assign_hard_reg (allocno, false))
2168 {
2169 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2170 fprintf (ira_dump_file, "assign reg %d\n",
2171 ALLOCNO_HARD_REGNO (allocno));
2172 }
2173 else if (ALLOCNO_ASSIGNED_P (allocno))
2174 {
2175 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2176 fprintf (ira_dump_file, "spill\n");
2177 }
2178 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2179 }
2180 }
2181
2182 /* Set up number of available hard registers for allocno A. */
2183 static void
2184 setup_allocno_available_regs_num (ira_allocno_t a)
2185 {
2186 int i, n, hard_regno, hard_regs_num, nwords;
2187 enum reg_class aclass;
2188 allocno_color_data_t data;
2189
2190 aclass = ALLOCNO_CLASS (a);
2191 data = ALLOCNO_COLOR_DATA (a);
2192 data->available_regs_num = 0;
2193 if (aclass == NO_REGS)
2194 return;
2195 hard_regs_num = ira_class_hard_regs_num[aclass];
2196 nwords = ALLOCNO_NUM_OBJECTS (a);
2197 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2198 {
2199 hard_regno = ira_class_hard_regs[aclass][i];
2200 /* Checking only profitable hard regs. */
2201 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2202 n++;
2203 }
2204 data->available_regs_num = n;
2205 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2206 return;
2207 fprintf
2208 (ira_dump_file,
2209 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2210 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2211 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2212 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2213 fprintf (ira_dump_file, ", %snode: ",
2214 hard_reg_set_equal_p (data->profitable_hard_regs,
2215 data->hard_regs_node->hard_regs->set)
2216 ? "" : "^");
2217 print_hard_reg_set (ira_dump_file,
2218 data->hard_regs_node->hard_regs->set, false);
2219 for (i = 0; i < nwords; i++)
2220 {
2221 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2222
2223 if (nwords != 1)
2224 {
2225 if (i != 0)
2226 fprintf (ira_dump_file, ", ");
2227 fprintf (ira_dump_file, " obj %d", i);
2228 }
2229 fprintf (ira_dump_file, " (confl regs = ");
2230 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2231 false);
2232 fprintf (ira_dump_file, ")");
2233 }
2234 fprintf (ira_dump_file, "\n");
2235 }
2236
2237 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2238 conflicting allocnos and hard registers. */
2239 static void
2240 put_allocno_into_bucket (ira_allocno_t allocno)
2241 {
2242 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2243 setup_allocno_available_regs_num (allocno);
2244 if (setup_left_conflict_sizes_p (allocno))
2245 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2246 else
2247 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2248 }
2249
2250 /* Map: allocno number -> allocno priority. */
2251 static int *allocno_priorities;
2252
2253 /* Set up priorities for N allocnos in array
2254 CONSIDERATION_ALLOCNOS. */
2255 static void
2256 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2257 {
2258 int i, length, nrefs, priority, max_priority, mult;
2259 ira_allocno_t a;
2260
2261 max_priority = 0;
2262 for (i = 0; i < n; i++)
2263 {
2264 a = consideration_allocnos[i];
2265 nrefs = ALLOCNO_NREFS (a);
2266 ira_assert (nrefs >= 0);
2267 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2268 ira_assert (mult >= 0);
2269 allocno_priorities[ALLOCNO_NUM (a)]
2270 = priority
2271 = (mult
2272 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2273 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2274 if (priority < 0)
2275 priority = -priority;
2276 if (max_priority < priority)
2277 max_priority = priority;
2278 }
2279 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2280 for (i = 0; i < n; i++)
2281 {
2282 a = consideration_allocnos[i];
2283 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2284 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2285 length /= ALLOCNO_NUM_OBJECTS (a);
2286 if (length <= 0)
2287 length = 1;
2288 allocno_priorities[ALLOCNO_NUM (a)]
2289 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2290 }
2291 }
2292
2293 /* Sort allocnos according to the profit of usage of a hard register
2294 instead of memory for them. */
2295 static int
2296 allocno_cost_compare_func (const void *v1p, const void *v2p)
2297 {
2298 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2299 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2300 int c1, c2;
2301
2302 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2303 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2304 if (c1 - c2)
2305 return c1 - c2;
2306
2307 /* If regs are equally good, sort by allocno numbers, so that the
2308 results of qsort leave nothing to chance. */
2309 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2310 }
2311
2312 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2313 possible to hard registers. Let us try to improve allocation with
2314 cost point of view. This function improves the allocation by
2315 spilling some allocnos and assigning the freed hard registers to
2316 other allocnos if it decreases the overall allocation cost. */
2317 static void
2318 improve_allocation (void)
2319 {
2320 unsigned int i;
2321 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2322 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2323 bool try_p;
2324 enum reg_class aclass;
2325 enum machine_mode mode;
2326 int *allocno_costs;
2327 int costs[FIRST_PSEUDO_REGISTER];
2328 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2329 ira_allocno_t a;
2330 bitmap_iterator bi;
2331
2332 /* Clear counts used to process conflicting allocnos only once for
2333 each allocno. */
2334 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2335 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2336 check = n = 0;
2337 /* Process each allocno and try to assign a hard register to it by
2338 spilling some its conflicting allocnos. */
2339 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2340 {
2341 a = ira_allocnos[i];
2342 ALLOCNO_COLOR_DATA (a)->temp = 0;
2343 if (empty_profitable_hard_regs (a))
2344 continue;
2345 check++;
2346 aclass = ALLOCNO_CLASS (a);
2347 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2348 if (allocno_costs == NULL)
2349 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2350 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2351 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2352 else if (allocno_costs == NULL)
2353 /* It means that assigning a hard register is not profitable
2354 (we don't waste memory for hard register costs in this
2355 case). */
2356 continue;
2357 else
2358 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2359 try_p = false;
2360 get_conflict_and_start_profitable_regs (a, false,
2361 conflicting_regs,
2362 &profitable_hard_regs);
2363 class_size = ira_class_hard_regs_num[aclass];
2364 /* Set up cost improvement for usage of each profitable hard
2365 register for allocno A. */
2366 for (j = 0; j < class_size; j++)
2367 {
2368 hregno = ira_class_hard_regs[aclass][j];
2369 if (! check_hard_reg_p (a, hregno,
2370 conflicting_regs, profitable_hard_regs))
2371 continue;
2372 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2373 k = allocno_costs == NULL ? 0 : j;
2374 costs[hregno] = (allocno_costs == NULL
2375 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2376 costs[hregno] -= base_cost;
2377 if (costs[hregno] < 0)
2378 try_p = true;
2379 }
2380 if (! try_p)
2381 /* There is no chance to improve the allocation cost by
2382 assigning hard register to allocno A even without spilling
2383 conflicting allocnos. */
2384 continue;
2385 mode = ALLOCNO_MODE (a);
2386 nwords = ALLOCNO_NUM_OBJECTS (a);
2387 /* Process each allocno conflicting with A and update the cost
2388 improvement for profitable hard registers of A. To use a
2389 hard register for A we need to spill some conflicting
2390 allocnos and that creates penalty for the cost
2391 improvement. */
2392 for (word = 0; word < nwords; word++)
2393 {
2394 ira_object_t conflict_obj;
2395 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2396 ira_object_conflict_iterator oci;
2397
2398 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2399 {
2400 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2401
2402 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2403 /* We already processed this conflicting allocno
2404 because we processed earlier another object of the
2405 conflicting allocno. */
2406 continue;
2407 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2408 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2409 continue;
2410 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2411 k = (ira_class_hard_reg_index
2412 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2413 ira_assert (k >= 0);
2414 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2415 != NULL)
2416 spill_cost -= allocno_costs[k];
2417 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2418 != NULL)
2419 spill_cost -= allocno_costs[k];
2420 else
2421 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2422 conflict_nregs
2423 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2424 for (r = conflict_hregno;
2425 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2426 r--)
2427 if (check_hard_reg_p (a, r,
2428 conflicting_regs, profitable_hard_regs))
2429 costs[r] += spill_cost;
2430 for (r = conflict_hregno + 1;
2431 r < conflict_hregno + conflict_nregs;
2432 r++)
2433 if (check_hard_reg_p (a, r,
2434 conflicting_regs, profitable_hard_regs))
2435 costs[r] += spill_cost;
2436 }
2437 }
2438 min_cost = INT_MAX;
2439 best = -1;
2440 /* Now we choose hard register for A which results in highest
2441 allocation cost improvement. */
2442 for (j = 0; j < class_size; j++)
2443 {
2444 hregno = ira_class_hard_regs[aclass][j];
2445 if (check_hard_reg_p (a, hregno,
2446 conflicting_regs, profitable_hard_regs)
2447 && min_cost > costs[hregno])
2448 {
2449 best = hregno;
2450 min_cost = costs[hregno];
2451 }
2452 }
2453 if (min_cost >= 0)
2454 /* We are in a situation when assigning any hard register to A
2455 by spilling some conflicting allocnos does not improve the
2456 allocation cost. */
2457 continue;
2458 nregs = hard_regno_nregs[best][mode];
2459 /* Now spill conflicting allocnos which contain a hard register
2460 of A when we assign the best chosen hard register to it. */
2461 for (word = 0; word < nwords; word++)
2462 {
2463 ira_object_t conflict_obj;
2464 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2465 ira_object_conflict_iterator oci;
2466
2467 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2468 {
2469 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2470
2471 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2472 continue;
2473 conflict_nregs
2474 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2475 if (best + nregs <= conflict_hregno
2476 || conflict_hregno + conflict_nregs <= best)
2477 /* No intersection. */
2478 continue;
2479 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2480 sorted_allocnos[n++] = conflict_a;
2481 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2482 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2483 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2484 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2485 }
2486 }
2487 /* Assign the best chosen hard register to A. */
2488 ALLOCNO_HARD_REGNO (a) = best;
2489 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2490 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2491 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2492 }
2493 if (n == 0)
2494 return;
2495 /* We spilled some allocnos to assign their hard registers to other
2496 allocnos. The spilled allocnos are now in array
2497 'sorted_allocnos'. There is still a possibility that some of the
2498 spilled allocnos can get hard registers. So let us try assign
2499 them hard registers again (just a reminder -- function
2500 'assign_hard_reg' assigns hard registers only if it is possible
2501 and profitable). We process the spilled allocnos with biggest
2502 benefit to get hard register first -- see function
2503 'allocno_cost_compare_func'. */
2504 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2505 allocno_cost_compare_func);
2506 for (j = 0; j < n; j++)
2507 {
2508 a = sorted_allocnos[j];
2509 ALLOCNO_ASSIGNED_P (a) = false;
2510 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2511 {
2512 fprintf (ira_dump_file, " ");
2513 ira_print_expanded_allocno (a);
2514 fprintf (ira_dump_file, " -- ");
2515 }
2516 if (assign_hard_reg (a, false))
2517 {
2518 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2519 fprintf (ira_dump_file, "assign hard reg %d\n",
2520 ALLOCNO_HARD_REGNO (a));
2521 }
2522 else
2523 {
2524 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2525 fprintf (ira_dump_file, "assign memory\n");
2526 }
2527 }
2528 }
2529
2530 /* Sort allocnos according to their priorities which are calculated
2531 analogous to ones in file `global.c'. */
2532 static int
2533 allocno_priority_compare_func (const void *v1p, const void *v2p)
2534 {
2535 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2536 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2537 int pri1, pri2;
2538
2539 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2540 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2541 if (pri2 != pri1)
2542 return SORTGT (pri2, pri1);
2543
2544 /* If regs are equally good, sort by allocnos, so that the results of
2545 qsort leave nothing to chance. */
2546 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2547 }
2548
2549 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2550 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2551 static void
2552 color_allocnos (void)
2553 {
2554 unsigned int i, n;
2555 bitmap_iterator bi;
2556 ira_allocno_t a;
2557
2558 setup_profitable_hard_regs ();
2559 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
2560 {
2561 n = 0;
2562 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2563 {
2564 a = ira_allocnos[i];
2565 if (ALLOCNO_CLASS (a) == NO_REGS)
2566 {
2567 ALLOCNO_HARD_REGNO (a) = -1;
2568 ALLOCNO_ASSIGNED_P (a) = true;
2569 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2570 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2571 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2572 {
2573 fprintf (ira_dump_file, " Spill");
2574 ira_print_expanded_allocno (a);
2575 fprintf (ira_dump_file, "\n");
2576 }
2577 continue;
2578 }
2579 sorted_allocnos[n++] = a;
2580 }
2581 if (n != 0)
2582 {
2583 setup_allocno_priorities (sorted_allocnos, n);
2584 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2585 allocno_priority_compare_func);
2586 for (i = 0; i < n; i++)
2587 {
2588 a = sorted_allocnos[i];
2589 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2590 {
2591 fprintf (ira_dump_file, " ");
2592 ira_print_expanded_allocno (a);
2593 fprintf (ira_dump_file, " -- ");
2594 }
2595 if (assign_hard_reg (a, false))
2596 {
2597 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2598 fprintf (ira_dump_file, "assign hard reg %d\n",
2599 ALLOCNO_HARD_REGNO (a));
2600 }
2601 else
2602 {
2603 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2604 fprintf (ira_dump_file, "assign memory\n");
2605 }
2606 }
2607 }
2608 }
2609 else
2610 {
2611 form_allocno_hard_regs_nodes_forest ();
2612 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2613 print_hard_regs_forest (ira_dump_file);
2614 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2615 {
2616 a = ira_allocnos[i];
2617 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
2618 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
2619 else
2620 {
2621 ALLOCNO_HARD_REGNO (a) = -1;
2622 ALLOCNO_ASSIGNED_P (a) = true;
2623 /* We don't need updated costs anymore. */
2624 ira_free_allocno_updated_costs (a);
2625 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2626 {
2627 fprintf (ira_dump_file, " Spill");
2628 ira_print_expanded_allocno (a);
2629 fprintf (ira_dump_file, "\n");
2630 }
2631 }
2632 }
2633 /* Put the allocnos into the corresponding buckets. */
2634 colorable_allocno_bucket = NULL;
2635 uncolorable_allocno_bucket = NULL;
2636 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2637 {
2638 a = ira_allocnos[i];
2639 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
2640 put_allocno_into_bucket (a);
2641 }
2642 push_allocnos_to_stack ();
2643 pop_allocnos_from_stack ();
2644 finish_allocno_hard_regs_nodes_forest ();
2645 }
2646 improve_allocation ();
2647 }
2648
2649 \f
2650
2651 /* Output information about the loop given by its LOOP_TREE_NODE. */
2652 static void
2653 print_loop_title (ira_loop_tree_node_t loop_tree_node)
2654 {
2655 unsigned int j;
2656 bitmap_iterator bi;
2657 ira_loop_tree_node_t subloop_node, dest_loop_node;
2658 edge e;
2659 edge_iterator ei;
2660
2661 if (loop_tree_node->parent == NULL)
2662 fprintf (ira_dump_file,
2663 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
2664 NUM_FIXED_BLOCKS);
2665 else
2666 {
2667 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
2668 fprintf (ira_dump_file,
2669 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
2670 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
2671 loop_tree_node->loop->header->index,
2672 loop_depth (loop_tree_node->loop));
2673 }
2674 for (subloop_node = loop_tree_node->children;
2675 subloop_node != NULL;
2676 subloop_node = subloop_node->next)
2677 if (subloop_node->bb != NULL)
2678 {
2679 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
2680 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
2681 if (e->dest != EXIT_BLOCK_PTR
2682 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
2683 != loop_tree_node))
2684 fprintf (ira_dump_file, "(->%d:l%d)",
2685 e->dest->index, dest_loop_node->loop_num);
2686 }
2687 fprintf (ira_dump_file, "\n all:");
2688 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2689 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2690 fprintf (ira_dump_file, "\n modified regnos:");
2691 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
2692 fprintf (ira_dump_file, " %d", j);
2693 fprintf (ira_dump_file, "\n border:");
2694 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
2695 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2696 fprintf (ira_dump_file, "\n Pressure:");
2697 for (j = 0; (int) j < ira_pressure_classes_num; j++)
2698 {
2699 enum reg_class pclass;
2700
2701 pclass = ira_pressure_classes[j];
2702 if (loop_tree_node->reg_pressure[pclass] == 0)
2703 continue;
2704 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
2705 loop_tree_node->reg_pressure[pclass]);
2706 }
2707 fprintf (ira_dump_file, "\n");
2708 }
2709
2710 /* Color the allocnos inside loop (in the extreme case it can be all
2711 of the function) given the corresponding LOOP_TREE_NODE. The
2712 function is called for each loop during top-down traverse of the
2713 loop tree. */
2714 static void
2715 color_pass (ira_loop_tree_node_t loop_tree_node)
2716 {
2717 int regno, hard_regno, index = -1, n;
2718 int cost, exit_freq, enter_freq;
2719 unsigned int j;
2720 bitmap_iterator bi;
2721 enum machine_mode mode;
2722 enum reg_class rclass, aclass, pclass;
2723 ira_allocno_t a, subloop_allocno;
2724 ira_loop_tree_node_t subloop_node;
2725
2726 ira_assert (loop_tree_node->bb == NULL);
2727 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2728 print_loop_title (loop_tree_node);
2729
2730 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
2731 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
2732 n = 0;
2733 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2734 {
2735 a = ira_allocnos[j];
2736 n++;
2737 if (! ALLOCNO_ASSIGNED_P (a))
2738 continue;
2739 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
2740 }
2741 allocno_color_data
2742 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
2743 * n);
2744 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
2745 curr_allocno_process = 0;
2746 n = 0;
2747 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2748 {
2749 a = ira_allocnos[j];
2750 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
2751 n++;
2752 }
2753 /* Color all mentioned allocnos including transparent ones. */
2754 color_allocnos ();
2755 /* Process caps. They are processed just once. */
2756 if (flag_ira_region == IRA_REGION_MIXED
2757 || flag_ira_region == IRA_REGION_ALL)
2758 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2759 {
2760 a = ira_allocnos[j];
2761 if (ALLOCNO_CAP_MEMBER (a) == NULL)
2762 continue;
2763 /* Remove from processing in the next loop. */
2764 bitmap_clear_bit (consideration_allocno_bitmap, j);
2765 rclass = ALLOCNO_CLASS (a);
2766 pclass = ira_pressure_class_translate[rclass];
2767 if (flag_ira_region == IRA_REGION_MIXED
2768 && (loop_tree_node->reg_pressure[pclass]
2769 <= ira_class_hard_regs_num[pclass]))
2770 {
2771 mode = ALLOCNO_MODE (a);
2772 hard_regno = ALLOCNO_HARD_REGNO (a);
2773 if (hard_regno >= 0)
2774 {
2775 index = ira_class_hard_reg_index[rclass][hard_regno];
2776 ira_assert (index >= 0);
2777 }
2778 regno = ALLOCNO_REGNO (a);
2779 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
2780 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
2781 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
2782 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2783 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2784 if (hard_regno >= 0)
2785 update_copy_costs (subloop_allocno, true);
2786 /* We don't need updated costs anymore: */
2787 ira_free_allocno_updated_costs (subloop_allocno);
2788 }
2789 }
2790 /* Update costs of the corresponding allocnos (not caps) in the
2791 subloops. */
2792 for (subloop_node = loop_tree_node->subloops;
2793 subloop_node != NULL;
2794 subloop_node = subloop_node->subloop_next)
2795 {
2796 ira_assert (subloop_node->bb == NULL);
2797 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2798 {
2799 a = ira_allocnos[j];
2800 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2801 mode = ALLOCNO_MODE (a);
2802 rclass = ALLOCNO_CLASS (a);
2803 pclass = ira_pressure_class_translate[rclass];
2804 hard_regno = ALLOCNO_HARD_REGNO (a);
2805 /* Use hard register class here. ??? */
2806 if (hard_regno >= 0)
2807 {
2808 index = ira_class_hard_reg_index[rclass][hard_regno];
2809 ira_assert (index >= 0);
2810 }
2811 regno = ALLOCNO_REGNO (a);
2812 /* ??? conflict costs */
2813 subloop_allocno = subloop_node->regno_allocno_map[regno];
2814 if (subloop_allocno == NULL
2815 || ALLOCNO_CAP (subloop_allocno) != NULL)
2816 continue;
2817 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
2818 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
2819 ALLOCNO_NUM (subloop_allocno)));
2820 if ((flag_ira_region == IRA_REGION_MIXED)
2821 && (loop_tree_node->reg_pressure[pclass]
2822 <= ira_class_hard_regs_num[pclass]))
2823 {
2824 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2825 {
2826 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2827 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2828 if (hard_regno >= 0)
2829 update_copy_costs (subloop_allocno, true);
2830 /* We don't need updated costs anymore: */
2831 ira_free_allocno_updated_costs (subloop_allocno);
2832 }
2833 continue;
2834 }
2835 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2836 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2837 ira_assert (regno < ira_reg_equiv_len);
2838 if (ira_reg_equiv_invariant_p[regno]
2839 || ira_reg_equiv_const[regno] != NULL_RTX)
2840 {
2841 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2842 {
2843 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2844 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2845 if (hard_regno >= 0)
2846 update_copy_costs (subloop_allocno, true);
2847 /* We don't need updated costs anymore: */
2848 ira_free_allocno_updated_costs (subloop_allocno);
2849 }
2850 }
2851 else if (hard_regno < 0)
2852 {
2853 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2854 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
2855 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
2856 }
2857 else
2858 {
2859 aclass = ALLOCNO_CLASS (subloop_allocno);
2860 ira_init_register_move_cost_if_necessary (mode);
2861 cost = (ira_register_move_cost[mode][rclass][rclass]
2862 * (exit_freq + enter_freq));
2863 ira_allocate_and_set_or_copy_costs
2864 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
2865 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
2866 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
2867 ira_allocate_and_set_or_copy_costs
2868 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
2869 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
2870 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
2871 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
2872 -= cost;
2873 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
2874 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
2875 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
2876 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
2877 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2878 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
2879 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
2880 }
2881 }
2882 }
2883 ira_free (allocno_color_data);
2884 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
2885 {
2886 a = ira_allocnos[j];
2887 ALLOCNO_ADD_DATA (a) = NULL;
2888 }
2889 }
2890
2891 /* Initialize the common data for coloring and calls functions to do
2892 Chaitin-Briggs and regional coloring. */
2893 static void
2894 do_coloring (void)
2895 {
2896 coloring_allocno_bitmap = ira_allocate_bitmap ();
2897 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2898 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
2899
2900 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
2901
2902 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2903 ira_print_disposition (ira_dump_file);
2904
2905 ira_free_bitmap (coloring_allocno_bitmap);
2906 }
2907
2908 \f
2909
2910 /* Move spill/restore code, which are to be generated in ira-emit.c,
2911 to less frequent points (if it is profitable) by reassigning some
2912 allocnos (in loop with subloops containing in another loop) to
2913 memory which results in longer live-range where the corresponding
2914 pseudo-registers will be in memory. */
2915 static void
2916 move_spill_restore (void)
2917 {
2918 int cost, regno, hard_regno, hard_regno2, index;
2919 bool changed_p;
2920 int enter_freq, exit_freq;
2921 enum machine_mode mode;
2922 enum reg_class rclass;
2923 ira_allocno_t a, parent_allocno, subloop_allocno;
2924 ira_loop_tree_node_t parent, loop_node, subloop_node;
2925 ira_allocno_iterator ai;
2926
2927 for (;;)
2928 {
2929 changed_p = false;
2930 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2931 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
2932 FOR_EACH_ALLOCNO (a, ai)
2933 {
2934 regno = ALLOCNO_REGNO (a);
2935 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2936 if (ALLOCNO_CAP_MEMBER (a) != NULL
2937 || ALLOCNO_CAP (a) != NULL
2938 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
2939 || loop_node->children == NULL
2940 /* don't do the optimization because it can create
2941 copies and the reload pass can spill the allocno set
2942 by copy although the allocno will not get memory
2943 slot. */
2944 || (regno < ira_reg_equiv_len
2945 && (ira_reg_equiv_invariant_p[regno]
2946 || ira_reg_equiv_const[regno] != NULL_RTX))
2947 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
2948 continue;
2949 mode = ALLOCNO_MODE (a);
2950 rclass = ALLOCNO_CLASS (a);
2951 index = ira_class_hard_reg_index[rclass][hard_regno];
2952 ira_assert (index >= 0);
2953 cost = (ALLOCNO_MEMORY_COST (a)
2954 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2955 ? ALLOCNO_CLASS_COST (a)
2956 : ALLOCNO_HARD_REG_COSTS (a)[index]));
2957 ira_init_register_move_cost_if_necessary (mode);
2958 for (subloop_node = loop_node->subloops;
2959 subloop_node != NULL;
2960 subloop_node = subloop_node->subloop_next)
2961 {
2962 ira_assert (subloop_node->bb == NULL);
2963 subloop_allocno = subloop_node->regno_allocno_map[regno];
2964 if (subloop_allocno == NULL)
2965 continue;
2966 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
2967 /* We have accumulated cost. To get the real cost of
2968 allocno usage in the loop we should subtract costs of
2969 the subloop allocnos. */
2970 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
2971 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
2972 ? ALLOCNO_CLASS_COST (subloop_allocno)
2973 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
2974 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2975 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2976 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
2977 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2978 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2979 else
2980 {
2981 cost
2982 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
2983 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2984 if (hard_regno2 != hard_regno)
2985 cost -= (ira_register_move_cost[mode][rclass][rclass]
2986 * (exit_freq + enter_freq));
2987 }
2988 }
2989 if ((parent = loop_node->parent) != NULL
2990 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
2991 {
2992 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
2993 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
2994 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
2995 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
2996 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2997 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2998 else
2999 {
3000 cost
3001 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3002 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3003 if (hard_regno2 != hard_regno)
3004 cost -= (ira_register_move_cost[mode][rclass][rclass]
3005 * (exit_freq + enter_freq));
3006 }
3007 }
3008 if (cost < 0)
3009 {
3010 ALLOCNO_HARD_REGNO (a) = -1;
3011 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3012 {
3013 fprintf
3014 (ira_dump_file,
3015 " Moving spill/restore for a%dr%d up from loop %d",
3016 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3017 fprintf (ira_dump_file, " - profit %d\n", -cost);
3018 }
3019 changed_p = true;
3020 }
3021 }
3022 if (! changed_p)
3023 break;
3024 }
3025 }
3026
3027 \f
3028
3029 /* Update current hard reg costs and current conflict hard reg costs
3030 for allocno A. It is done by processing its copies containing
3031 other allocnos already assigned. */
3032 static void
3033 update_curr_costs (ira_allocno_t a)
3034 {
3035 int i, hard_regno, cost;
3036 enum machine_mode mode;
3037 enum reg_class aclass, rclass;
3038 ira_allocno_t another_a;
3039 ira_copy_t cp, next_cp;
3040
3041 ira_free_allocno_updated_costs (a);
3042 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3043 aclass = ALLOCNO_CLASS (a);
3044 if (aclass == NO_REGS)
3045 return;
3046 mode = ALLOCNO_MODE (a);
3047 ira_init_register_move_cost_if_necessary (mode);
3048 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3049 {
3050 if (cp->first == a)
3051 {
3052 next_cp = cp->next_first_allocno_copy;
3053 another_a = cp->second;
3054 }
3055 else if (cp->second == a)
3056 {
3057 next_cp = cp->next_second_allocno_copy;
3058 another_a = cp->first;
3059 }
3060 else
3061 gcc_unreachable ();
3062 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3063 || ! ALLOCNO_ASSIGNED_P (another_a)
3064 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3065 continue;
3066 rclass = REGNO_REG_CLASS (hard_regno);
3067 i = ira_class_hard_reg_index[aclass][hard_regno];
3068 if (i < 0)
3069 continue;
3070 cost = (cp->first == a
3071 ? ira_register_move_cost[mode][rclass][aclass]
3072 : ira_register_move_cost[mode][aclass][rclass]);
3073 ira_allocate_and_set_or_copy_costs
3074 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3075 ALLOCNO_HARD_REG_COSTS (a));
3076 ira_allocate_and_set_or_copy_costs
3077 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3078 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3079 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3080 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3081 }
3082 }
3083
3084 /* Try to assign hard registers to the unassigned allocnos and
3085 allocnos conflicting with them or conflicting with allocnos whose
3086 regno >= START_REGNO. The function is called after ira_flattening,
3087 so more allocnos (including ones created in ira-emit.c) will have a
3088 chance to get a hard register. We use simple assignment algorithm
3089 based on priorities. */
3090 void
3091 ira_reassign_conflict_allocnos (int start_regno)
3092 {
3093 int i, allocnos_to_color_num;
3094 ira_allocno_t a;
3095 enum reg_class aclass;
3096 bitmap allocnos_to_color;
3097 ira_allocno_iterator ai;
3098
3099 allocnos_to_color = ira_allocate_bitmap ();
3100 allocnos_to_color_num = 0;
3101 FOR_EACH_ALLOCNO (a, ai)
3102 {
3103 int n = ALLOCNO_NUM_OBJECTS (a);
3104
3105 if (! ALLOCNO_ASSIGNED_P (a)
3106 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3107 {
3108 if (ALLOCNO_CLASS (a) != NO_REGS)
3109 sorted_allocnos[allocnos_to_color_num++] = a;
3110 else
3111 {
3112 ALLOCNO_ASSIGNED_P (a) = true;
3113 ALLOCNO_HARD_REGNO (a) = -1;
3114 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3115 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3116 }
3117 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3118 }
3119 if (ALLOCNO_REGNO (a) < start_regno
3120 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3121 continue;
3122 for (i = 0; i < n; i++)
3123 {
3124 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3125 ira_object_t conflict_obj;
3126 ira_object_conflict_iterator oci;
3127
3128 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3129 {
3130 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3131
3132 ira_assert (ira_reg_classes_intersect_p
3133 [aclass][ALLOCNO_CLASS (conflict_a)]);
3134 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3135 continue;
3136 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3137 }
3138 }
3139 }
3140 ira_free_bitmap (allocnos_to_color);
3141 if (allocnos_to_color_num > 1)
3142 {
3143 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3144 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3145 allocno_priority_compare_func);
3146 }
3147 for (i = 0; i < allocnos_to_color_num; i++)
3148 {
3149 a = sorted_allocnos[i];
3150 ALLOCNO_ASSIGNED_P (a) = false;
3151 update_curr_costs (a);
3152 }
3153 for (i = 0; i < allocnos_to_color_num; i++)
3154 {
3155 a = sorted_allocnos[i];
3156 if (assign_hard_reg (a, true))
3157 {
3158 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3159 fprintf
3160 (ira_dump_file,
3161 " Secondary allocation: assign hard reg %d to reg %d\n",
3162 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3163 }
3164 }
3165 }
3166
3167 \f
3168
3169 /* This page contains functions used to find conflicts using allocno
3170 live ranges. */
3171
3172 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
3173 used to find a conflict for new allocnos or allocnos with the
3174 different allocno classes. */
3175 static bool
3176 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
3177 {
3178 rtx reg1, reg2;
3179 int i, j;
3180 int n1 = ALLOCNO_NUM_OBJECTS (a1);
3181 int n2 = ALLOCNO_NUM_OBJECTS (a2);
3182
3183 if (a1 == a2)
3184 return false;
3185 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
3186 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
3187 if (reg1 != NULL && reg2 != NULL
3188 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
3189 return false;
3190
3191 for (i = 0; i < n1; i++)
3192 {
3193 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
3194
3195 for (j = 0; j < n2; j++)
3196 {
3197 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
3198
3199 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
3200 OBJECT_LIVE_RANGES (c2)))
3201 return true;
3202 }
3203 }
3204 return false;
3205 }
3206
3207 #ifdef ENABLE_IRA_CHECKING
3208
3209 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3210 intersect. This should be used when there is only one region.
3211 Currently this is used during reload. */
3212 static bool
3213 conflict_by_live_ranges_p (int regno1, int regno2)
3214 {
3215 ira_allocno_t a1, a2;
3216
3217 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3218 && regno2 >= FIRST_PSEUDO_REGISTER);
3219 /* Reg info caclulated by dataflow infrastructure can be different
3220 from one calculated by regclass. */
3221 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3222 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3223 return false;
3224 return allocnos_conflict_by_live_ranges_p (a1, a2);
3225 }
3226
3227 #endif
3228
3229 \f
3230
3231 /* This page contains code to coalesce memory stack slots used by
3232 spilled allocnos. This results in smaller stack frame, better data
3233 locality, and in smaller code for some architectures like
3234 x86/x86_64 where insn size depends on address displacement value.
3235 On the other hand, it can worsen insn scheduling after the RA but
3236 in practice it is less important than smaller stack frames. */
3237
3238 /* TRUE if we coalesced some allocnos. In other words, if we got
3239 loops formed by members first_coalesced_allocno and
3240 next_coalesced_allocno containing more one allocno. */
3241 static bool allocno_coalesced_p;
3242
3243 /* Bitmap used to prevent a repeated allocno processing because of
3244 coalescing. */
3245 static bitmap processed_coalesced_allocno_bitmap;
3246
3247 /* See below. */
3248 typedef struct coalesce_data *coalesce_data_t;
3249
3250 /* To decrease footprint of ira_allocno structure we store all data
3251 needed only for coalescing in the following structure. */
3252 struct coalesce_data
3253 {
3254 /* Coalesced allocnos form a cyclic list. One allocno given by
3255 FIRST represents all coalesced allocnos. The
3256 list is chained by NEXT. */
3257 ira_allocno_t first;
3258 ira_allocno_t next;
3259 int temp;
3260 };
3261
3262 /* Container for storing allocno data concerning coalescing. */
3263 static coalesce_data_t allocno_coalesce_data;
3264
3265 /* Macro to access the data concerning coalescing. */
3266 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3267
3268 /* The function is used to sort allocnos according to their execution
3269 frequencies. */
3270 static int
3271 copy_freq_compare_func (const void *v1p, const void *v2p)
3272 {
3273 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
3274 int pri1, pri2;
3275
3276 pri1 = cp1->freq;
3277 pri2 = cp2->freq;
3278 if (pri2 - pri1)
3279 return pri2 - pri1;
3280
3281 /* If freqencies are equal, sort by copies, so that the results of
3282 qsort leave nothing to chance. */
3283 return cp1->num - cp2->num;
3284 }
3285
3286 /* Merge two sets of coalesced allocnos given correspondingly by
3287 allocnos A1 and A2 (more accurately merging A2 set into A1
3288 set). */
3289 static void
3290 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3291 {
3292 ira_allocno_t a, first, last, next;
3293
3294 first = ALLOCNO_COALESCE_DATA (a1)->first;
3295 a = ALLOCNO_COALESCE_DATA (a2)->first;
3296 if (first == a)
3297 return;
3298 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3299 a = ALLOCNO_COALESCE_DATA (a)->next)
3300 {
3301 ALLOCNO_COALESCE_DATA (a)->first = first;
3302 if (a == a2)
3303 break;
3304 last = a;
3305 }
3306 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3307 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3308 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3309 }
3310
3311 /* Return TRUE if there are conflicting allocnos from two sets of
3312 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3313 use live ranges to find conflicts because conflicts are represented
3314 only for allocnos of the same allocno class and during the reload
3315 pass we coalesce allocnos for sharing stack memory slots. */
3316 static bool
3317 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3318 {
3319 ira_allocno_t a, conflict_a;
3320
3321 if (allocno_coalesced_p)
3322 {
3323 bitmap_clear (processed_coalesced_allocno_bitmap);
3324 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3325 a = ALLOCNO_COALESCE_DATA (a)->next)
3326 {
3327 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3328 if (a == a1)
3329 break;
3330 }
3331 }
3332 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3333 a = ALLOCNO_COALESCE_DATA (a)->next)
3334 {
3335 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3336 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3337 {
3338 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3339 return true;
3340 if (conflict_a == a1)
3341 break;
3342 }
3343 if (a == a2)
3344 break;
3345 }
3346 return false;
3347 }
3348
3349 /* The major function for aggressive allocno coalescing. We coalesce
3350 only spilled allocnos. If some allocnos have been coalesced, we
3351 set up flag allocno_coalesced_p. */
3352 static void
3353 coalesce_allocnos (void)
3354 {
3355 ira_allocno_t a;
3356 ira_copy_t cp, next_cp, *sorted_copies;
3357 unsigned int j;
3358 int i, n, cp_num, regno;
3359 bitmap_iterator bi;
3360
3361 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
3362 * sizeof (ira_copy_t));
3363 cp_num = 0;
3364 /* Collect copies. */
3365 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3366 {
3367 a = ira_allocnos[j];
3368 regno = ALLOCNO_REGNO (a);
3369 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3370 || (regno < ira_reg_equiv_len
3371 && (ira_reg_equiv_const[regno] != NULL_RTX
3372 || ira_reg_equiv_invariant_p[regno])))
3373 continue;
3374 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3375 {
3376 if (cp->first == a)
3377 {
3378 next_cp = cp->next_first_allocno_copy;
3379 regno = ALLOCNO_REGNO (cp->second);
3380 /* For priority coloring we coalesce allocnos only with
3381 the same allocno class not with intersected allocno
3382 classes as it were possible. It is done for
3383 simplicity. */
3384 if ((cp->insn != NULL || cp->constraint_p)
3385 && ALLOCNO_ASSIGNED_P (cp->second)
3386 && ALLOCNO_HARD_REGNO (cp->second) < 0
3387 && (regno >= ira_reg_equiv_len
3388 || (! ira_reg_equiv_invariant_p[regno]
3389 && ira_reg_equiv_const[regno] == NULL_RTX)))
3390 sorted_copies[cp_num++] = cp;
3391 }
3392 else if (cp->second == a)
3393 next_cp = cp->next_second_allocno_copy;
3394 else
3395 gcc_unreachable ();
3396 }
3397 }
3398 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3399 /* Coalesced copies, most frequently executed first. */
3400 for (; cp_num != 0;)
3401 {
3402 for (i = 0; i < cp_num; i++)
3403 {
3404 cp = sorted_copies[i];
3405 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3406 {
3407 allocno_coalesced_p = true;
3408 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3409 fprintf
3410 (ira_dump_file,
3411 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3412 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3413 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3414 cp->freq);
3415 merge_allocnos (cp->first, cp->second);
3416 i++;
3417 break;
3418 }
3419 }
3420 /* Collect the rest of copies. */
3421 for (n = 0; i < cp_num; i++)
3422 {
3423 cp = sorted_copies[i];
3424 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3425 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3426 sorted_copies[n++] = cp;
3427 }
3428 cp_num = n;
3429 }
3430 ira_free (sorted_copies);
3431 }
3432
3433 /* Usage cost and order number of coalesced allocno set to which
3434 given pseudo register belongs to. */
3435 static int *regno_coalesced_allocno_cost;
3436 static int *regno_coalesced_allocno_num;
3437
3438 /* Sort pseudos according frequencies of coalesced allocno sets they
3439 belong to (putting most frequently ones first), and according to
3440 coalesced allocno set order numbers. */
3441 static int
3442 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3443 {
3444 const int regno1 = *(const int *) v1p;
3445 const int regno2 = *(const int *) v2p;
3446 int diff;
3447
3448 if ((diff = (regno_coalesced_allocno_cost[regno2]
3449 - regno_coalesced_allocno_cost[regno1])) != 0)
3450 return diff;
3451 if ((diff = (regno_coalesced_allocno_num[regno1]
3452 - regno_coalesced_allocno_num[regno2])) != 0)
3453 return diff;
3454 return regno1 - regno2;
3455 }
3456
3457 /* Widest width in which each pseudo reg is referred to (via subreg).
3458 It is used for sorting pseudo registers. */
3459 static unsigned int *regno_max_ref_width;
3460
3461 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
3462 #ifdef STACK_GROWS_DOWNWARD
3463 # undef STACK_GROWS_DOWNWARD
3464 # define STACK_GROWS_DOWNWARD 1
3465 #else
3466 # define STACK_GROWS_DOWNWARD 0
3467 #endif
3468
3469 /* Sort pseudos according their slot numbers (putting ones with
3470 smaller numbers first, or last when the frame pointer is not
3471 needed). */
3472 static int
3473 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3474 {
3475 const int regno1 = *(const int *) v1p;
3476 const int regno2 = *(const int *) v2p;
3477 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3478 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3479 int diff, slot_num1, slot_num2;
3480 int total_size1, total_size2;
3481
3482 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3483 {
3484 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3485 return regno1 - regno2;
3486 return 1;
3487 }
3488 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3489 return -1;
3490 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3491 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3492 if ((diff = slot_num1 - slot_num2) != 0)
3493 return (frame_pointer_needed
3494 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
3495 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3496 regno_max_ref_width[regno1]);
3497 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3498 regno_max_ref_width[regno2]);
3499 if ((diff = total_size2 - total_size1) != 0)
3500 return diff;
3501 return regno1 - regno2;
3502 }
3503
3504 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3505 for coalesced allocno sets containing allocnos with their regnos
3506 given in array PSEUDO_REGNOS of length N. */
3507 static void
3508 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3509 {
3510 int i, num, regno, cost;
3511 ira_allocno_t allocno, a;
3512
3513 for (num = i = 0; i < n; i++)
3514 {
3515 regno = pseudo_regnos[i];
3516 allocno = ira_regno_allocno_map[regno];
3517 if (allocno == NULL)
3518 {
3519 regno_coalesced_allocno_cost[regno] = 0;
3520 regno_coalesced_allocno_num[regno] = ++num;
3521 continue;
3522 }
3523 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3524 continue;
3525 num++;
3526 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3527 a = ALLOCNO_COALESCE_DATA (a)->next)
3528 {
3529 cost += ALLOCNO_FREQ (a);
3530 if (a == allocno)
3531 break;
3532 }
3533 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3534 a = ALLOCNO_COALESCE_DATA (a)->next)
3535 {
3536 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3537 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3538 if (a == allocno)
3539 break;
3540 }
3541 }
3542 }
3543
3544 /* Collect spilled allocnos representing coalesced allocno sets (the
3545 first coalesced allocno). The collected allocnos are returned
3546 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3547 number of the collected allocnos. The allocnos are given by their
3548 regnos in array PSEUDO_REGNOS of length N. */
3549 static int
3550 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3551 ira_allocno_t *spilled_coalesced_allocnos)
3552 {
3553 int i, num, regno;
3554 ira_allocno_t allocno;
3555
3556 for (num = i = 0; i < n; i++)
3557 {
3558 regno = pseudo_regnos[i];
3559 allocno = ira_regno_allocno_map[regno];
3560 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3561 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3562 continue;
3563 spilled_coalesced_allocnos[num++] = allocno;
3564 }
3565 return num;
3566 }
3567
3568 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3569 given slot contains live ranges of coalesced allocnos assigned to
3570 given slot. */
3571 static live_range_t *slot_coalesced_allocnos_live_ranges;
3572
3573 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3574 ranges intersected with live ranges of coalesced allocnos assigned
3575 to slot with number N. */
3576 static bool
3577 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3578 {
3579 ira_allocno_t a;
3580
3581 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3582 a = ALLOCNO_COALESCE_DATA (a)->next)
3583 {
3584 int i;
3585 int nr = ALLOCNO_NUM_OBJECTS (a);
3586
3587 for (i = 0; i < nr; i++)
3588 {
3589 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3590
3591 if (ira_live_ranges_intersect_p
3592 (slot_coalesced_allocnos_live_ranges[n],
3593 OBJECT_LIVE_RANGES (obj)))
3594 return true;
3595 }
3596 if (a == allocno)
3597 break;
3598 }
3599 return false;
3600 }
3601
3602 /* Update live ranges of slot to which coalesced allocnos represented
3603 by ALLOCNO were assigned. */
3604 static void
3605 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
3606 {
3607 int i, n;
3608 ira_allocno_t a;
3609 live_range_t r;
3610
3611 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
3612 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3613 a = ALLOCNO_COALESCE_DATA (a)->next)
3614 {
3615 int nr = ALLOCNO_NUM_OBJECTS (a);
3616 for (i = 0; i < nr; i++)
3617 {
3618 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3619
3620 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
3621 slot_coalesced_allocnos_live_ranges[n]
3622 = ira_merge_live_ranges
3623 (slot_coalesced_allocnos_live_ranges[n], r);
3624 }
3625 if (a == allocno)
3626 break;
3627 }
3628 }
3629
3630 /* We have coalesced allocnos involving in copies. Coalesce allocnos
3631 further in order to share the same memory stack slot. Allocnos
3632 representing sets of allocnos coalesced before the call are given
3633 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
3634 some allocnos were coalesced in the function. */
3635 static bool
3636 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
3637 {
3638 int i, j, n, last_coalesced_allocno_num;
3639 ira_allocno_t allocno, a;
3640 bool merged_p = false;
3641 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
3642
3643 slot_coalesced_allocnos_live_ranges
3644 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
3645 memset (slot_coalesced_allocnos_live_ranges, 0,
3646 sizeof (live_range_t) * ira_allocnos_num);
3647 last_coalesced_allocno_num = 0;
3648 /* Coalesce non-conflicting spilled allocnos preferring most
3649 frequently used. */
3650 for (i = 0; i < num; i++)
3651 {
3652 allocno = spilled_coalesced_allocnos[i];
3653 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
3654 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
3655 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
3656 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
3657 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
3658 continue;
3659 for (j = 0; j < i; j++)
3660 {
3661 a = spilled_coalesced_allocnos[j];
3662 n = ALLOCNO_COALESCE_DATA (a)->temp;
3663 if (ALLOCNO_COALESCE_DATA (a)->first == a
3664 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
3665 && (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
3666 || (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
3667 && ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
3668 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
3669 break;
3670 }
3671 if (j >= i)
3672 {
3673 /* No coalescing: set up number for coalesced allocnos
3674 represented by ALLOCNO. */
3675 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
3676 setup_slot_coalesced_allocno_live_ranges (allocno);
3677 }
3678 else
3679 {
3680 allocno_coalesced_p = true;
3681 merged_p = true;
3682 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3683 fprintf (ira_dump_file,
3684 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
3685 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
3686 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
3687 ALLOCNO_COALESCE_DATA (allocno)->temp
3688 = ALLOCNO_COALESCE_DATA (a)->temp;
3689 setup_slot_coalesced_allocno_live_ranges (allocno);
3690 merge_allocnos (a, allocno);
3691 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
3692 }
3693 }
3694 for (i = 0; i < ira_allocnos_num; i++)
3695 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
3696 ira_free (slot_coalesced_allocnos_live_ranges);
3697 return merged_p;
3698 }
3699
3700 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
3701 subsequent assigning stack slots to them in the reload pass. To do
3702 this we coalesce spilled allocnos first to decrease the number of
3703 memory-memory move insns. This function is called by the
3704 reload. */
3705 void
3706 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
3707 unsigned int *reg_max_ref_width)
3708 {
3709 int max_regno = max_reg_num ();
3710 int i, regno, num, slot_num;
3711 ira_allocno_t allocno, a;
3712 ira_allocno_iterator ai;
3713 ira_allocno_t *spilled_coalesced_allocnos;
3714
3715 /* Set up allocnos can be coalesced. */
3716 coloring_allocno_bitmap = ira_allocate_bitmap ();
3717 for (i = 0; i < n; i++)
3718 {
3719 regno = pseudo_regnos[i];
3720 allocno = ira_regno_allocno_map[regno];
3721 if (allocno != NULL)
3722 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
3723 }
3724 allocno_coalesced_p = false;
3725 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
3726 allocno_coalesce_data
3727 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
3728 * ira_allocnos_num);
3729 /* Initialize coalesce data for allocnos. */
3730 FOR_EACH_ALLOCNO (a, ai)
3731 {
3732 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
3733 ALLOCNO_COALESCE_DATA (a)->first = a;
3734 ALLOCNO_COALESCE_DATA (a)->next = a;
3735 }
3736 coalesce_allocnos ();
3737 ira_free_bitmap (coloring_allocno_bitmap);
3738 regno_coalesced_allocno_cost
3739 = (int *) ira_allocate (max_regno * sizeof (int));
3740 regno_coalesced_allocno_num
3741 = (int *) ira_allocate (max_regno * sizeof (int));
3742 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
3743 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
3744 /* Sort regnos according frequencies of the corresponding coalesced
3745 allocno sets. */
3746 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
3747 spilled_coalesced_allocnos
3748 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
3749 * sizeof (ira_allocno_t));
3750 /* Collect allocnos representing the spilled coalesced allocno
3751 sets. */
3752 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
3753 spilled_coalesced_allocnos);
3754 if (flag_ira_share_spill_slots
3755 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
3756 {
3757 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
3758 qsort (pseudo_regnos, n, sizeof (int),
3759 coalesced_pseudo_reg_freq_compare);
3760 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
3761 spilled_coalesced_allocnos);
3762 }
3763 ira_free_bitmap (processed_coalesced_allocno_bitmap);
3764 allocno_coalesced_p = false;
3765 /* Assign stack slot numbers to spilled allocno sets, use smaller
3766 numbers for most frequently used coalesced allocnos. -1 is
3767 reserved for dynamic search of stack slots for pseudos spilled by
3768 the reload. */
3769 slot_num = 1;
3770 for (i = 0; i < num; i++)
3771 {
3772 allocno = spilled_coalesced_allocnos[i];
3773 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
3774 || ALLOCNO_HARD_REGNO (allocno) >= 0
3775 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
3776 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
3777 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
3778 continue;
3779 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3780 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
3781 slot_num++;
3782 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3783 a = ALLOCNO_COALESCE_DATA (a)->next)
3784 {
3785 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
3786 ALLOCNO_HARD_REGNO (a) = -slot_num;
3787 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3788 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
3789 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
3790 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
3791 reg_max_ref_width[ALLOCNO_REGNO (a)]));
3792
3793 if (a == allocno)
3794 break;
3795 }
3796 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3797 fprintf (ira_dump_file, "\n");
3798 }
3799 ira_spilled_reg_stack_slots_num = slot_num - 1;
3800 ira_free (spilled_coalesced_allocnos);
3801 /* Sort regnos according the slot numbers. */
3802 regno_max_ref_width = reg_max_ref_width;
3803 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
3804 FOR_EACH_ALLOCNO (a, ai)
3805 ALLOCNO_ADD_DATA (a) = NULL;
3806 ira_free (allocno_coalesce_data);
3807 ira_free (regno_coalesced_allocno_num);
3808 ira_free (regno_coalesced_allocno_cost);
3809 }
3810
3811 \f
3812
3813 /* This page contains code used by the reload pass to improve the
3814 final code. */
3815
3816 /* The function is called from reload to mark changes in the
3817 allocation of REGNO made by the reload. Remember that reg_renumber
3818 reflects the change result. */
3819 void
3820 ira_mark_allocation_change (int regno)
3821 {
3822 ira_allocno_t a = ira_regno_allocno_map[regno];
3823 int old_hard_regno, hard_regno, cost;
3824 enum reg_class aclass = ALLOCNO_CLASS (a);
3825
3826 ira_assert (a != NULL);
3827 hard_regno = reg_renumber[regno];
3828 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
3829 return;
3830 if (old_hard_regno < 0)
3831 cost = -ALLOCNO_MEMORY_COST (a);
3832 else
3833 {
3834 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
3835 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
3836 ? ALLOCNO_CLASS_COST (a)
3837 : ALLOCNO_HARD_REG_COSTS (a)
3838 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
3839 update_copy_costs (a, false);
3840 }
3841 ira_overall_cost -= cost;
3842 ALLOCNO_HARD_REGNO (a) = hard_regno;
3843 if (hard_regno < 0)
3844 {
3845 ALLOCNO_HARD_REGNO (a) = -1;
3846 cost += ALLOCNO_MEMORY_COST (a);
3847 }
3848 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
3849 {
3850 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
3851 ? ALLOCNO_CLASS_COST (a)
3852 : ALLOCNO_HARD_REG_COSTS (a)
3853 [ira_class_hard_reg_index[aclass][hard_regno]]);
3854 update_copy_costs (a, true);
3855 }
3856 else
3857 /* Reload changed class of the allocno. */
3858 cost = 0;
3859 ira_overall_cost += cost;
3860 }
3861
3862 /* This function is called when reload deletes memory-memory move. In
3863 this case we marks that the allocation of the corresponding
3864 allocnos should be not changed in future. Otherwise we risk to get
3865 a wrong code. */
3866 void
3867 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
3868 {
3869 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
3870 ira_allocno_t src = ira_regno_allocno_map[src_regno];
3871
3872 ira_assert (dst != NULL && src != NULL
3873 && ALLOCNO_HARD_REGNO (dst) < 0
3874 && ALLOCNO_HARD_REGNO (src) < 0);
3875 ALLOCNO_DONT_REASSIGN_P (dst) = true;
3876 ALLOCNO_DONT_REASSIGN_P (src) = true;
3877 }
3878
3879 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
3880 allocno A and return TRUE in the case of success. */
3881 static bool
3882 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
3883 {
3884 int hard_regno;
3885 enum reg_class aclass;
3886 int regno = ALLOCNO_REGNO (a);
3887 HARD_REG_SET saved[2];
3888 int i, n;
3889
3890 n = ALLOCNO_NUM_OBJECTS (a);
3891 for (i = 0; i < n; i++)
3892 {
3893 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3894 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
3895 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
3896 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3897 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3898 call_used_reg_set);
3899 }
3900 ALLOCNO_ASSIGNED_P (a) = false;
3901 aclass = ALLOCNO_CLASS (a);
3902 update_curr_costs (a);
3903 assign_hard_reg (a, true);
3904 hard_regno = ALLOCNO_HARD_REGNO (a);
3905 reg_renumber[regno] = hard_regno;
3906 if (hard_regno < 0)
3907 ALLOCNO_HARD_REGNO (a) = -1;
3908 else
3909 {
3910 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
3911 ira_overall_cost
3912 -= (ALLOCNO_MEMORY_COST (a)
3913 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3914 ? ALLOCNO_CLASS_COST (a)
3915 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
3916 [aclass][hard_regno]]));
3917 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
3918 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
3919 call_used_reg_set))
3920 {
3921 ira_assert (flag_caller_saves);
3922 caller_save_needed = 1;
3923 }
3924 }
3925
3926 /* If we found a hard register, modify the RTL for the pseudo
3927 register to show the hard register, and mark the pseudo register
3928 live. */
3929 if (reg_renumber[regno] >= 0)
3930 {
3931 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3932 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
3933 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
3934 mark_home_live (regno);
3935 }
3936 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3937 fprintf (ira_dump_file, "\n");
3938 for (i = 0; i < n; i++)
3939 {
3940 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3941 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
3942 }
3943 return reg_renumber[regno] >= 0;
3944 }
3945
3946 /* Sort pseudos according their usage frequencies (putting most
3947 frequently ones first). */
3948 static int
3949 pseudo_reg_compare (const void *v1p, const void *v2p)
3950 {
3951 int regno1 = *(const int *) v1p;
3952 int regno2 = *(const int *) v2p;
3953 int diff;
3954
3955 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
3956 return diff;
3957 return regno1 - regno2;
3958 }
3959
3960 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
3961 NUM of them) or spilled pseudos conflicting with pseudos in
3962 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
3963 allocation has been changed. The function doesn't use
3964 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
3965 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
3966 is called by the reload pass at the end of each reload
3967 iteration. */
3968 bool
3969 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
3970 HARD_REG_SET bad_spill_regs,
3971 HARD_REG_SET *pseudo_forbidden_regs,
3972 HARD_REG_SET *pseudo_previous_regs,
3973 bitmap spilled)
3974 {
3975 int i, n, regno;
3976 bool changed_p;
3977 ira_allocno_t a;
3978 HARD_REG_SET forbidden_regs;
3979 bitmap temp = BITMAP_ALLOC (NULL);
3980
3981 /* Add pseudos which conflict with pseudos already in
3982 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
3983 to allocating in two steps as some of the conflicts might have
3984 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
3985 for (i = 0; i < num; i++)
3986 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
3987
3988 for (i = 0, n = num; i < n; i++)
3989 {
3990 int nr, j;
3991 int regno = spilled_pseudo_regs[i];
3992 bitmap_set_bit (temp, regno);
3993
3994 a = ira_regno_allocno_map[regno];
3995 nr = ALLOCNO_NUM_OBJECTS (a);
3996 for (j = 0; j < nr; j++)
3997 {
3998 ira_object_t conflict_obj;
3999 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4000 ira_object_conflict_iterator oci;
4001
4002 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4003 {
4004 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4005 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4006 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4007 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4008 {
4009 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4010 /* ?!? This seems wrong. */
4011 bitmap_set_bit (consideration_allocno_bitmap,
4012 ALLOCNO_NUM (conflict_a));
4013 }
4014 }
4015 }
4016 }
4017
4018 if (num > 1)
4019 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4020 changed_p = false;
4021 /* Try to assign hard registers to pseudos from
4022 SPILLED_PSEUDO_REGS. */
4023 for (i = 0; i < num; i++)
4024 {
4025 regno = spilled_pseudo_regs[i];
4026 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4027 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4028 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4029 gcc_assert (reg_renumber[regno] < 0);
4030 a = ira_regno_allocno_map[regno];
4031 ira_mark_allocation_change (regno);
4032 ira_assert (reg_renumber[regno] < 0);
4033 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4034 fprintf (ira_dump_file,
4035 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4036 ALLOCNO_MEMORY_COST (a)
4037 - ALLOCNO_CLASS_COST (a));
4038 allocno_reload_assign (a, forbidden_regs);
4039 if (reg_renumber[regno] >= 0)
4040 {
4041 CLEAR_REGNO_REG_SET (spilled, regno);
4042 changed_p = true;
4043 }
4044 }
4045 BITMAP_FREE (temp);
4046 return changed_p;
4047 }
4048
4049 /* The function is called by reload and returns already allocated
4050 stack slot (if any) for REGNO with given INHERENT_SIZE and
4051 TOTAL_SIZE. In the case of failure to find a slot which can be
4052 used for REGNO, the function returns NULL. */
4053 rtx
4054 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4055 unsigned int total_size)
4056 {
4057 unsigned int i;
4058 int slot_num, best_slot_num;
4059 int cost, best_cost;
4060 ira_copy_t cp, next_cp;
4061 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4062 rtx x;
4063 bitmap_iterator bi;
4064 struct ira_spilled_reg_stack_slot *slot = NULL;
4065
4066 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4067 && inherent_size <= total_size
4068 && ALLOCNO_HARD_REGNO (allocno) < 0);
4069 if (! flag_ira_share_spill_slots)
4070 return NULL_RTX;
4071 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4072 if (slot_num != -1)
4073 {
4074 slot = &ira_spilled_reg_stack_slots[slot_num];
4075 x = slot->mem;
4076 }
4077 else
4078 {
4079 best_cost = best_slot_num = -1;
4080 x = NULL_RTX;
4081 /* It means that the pseudo was spilled in the reload pass, try
4082 to reuse a slot. */
4083 for (slot_num = 0;
4084 slot_num < ira_spilled_reg_stack_slots_num;
4085 slot_num++)
4086 {
4087 slot = &ira_spilled_reg_stack_slots[slot_num];
4088 if (slot->mem == NULL_RTX)
4089 continue;
4090 if (slot->width < total_size
4091 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4092 continue;
4093
4094 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4095 FIRST_PSEUDO_REGISTER, i, bi)
4096 {
4097 another_allocno = ira_regno_allocno_map[i];
4098 if (allocnos_conflict_by_live_ranges_p (allocno,
4099 another_allocno))
4100 goto cont;
4101 }
4102 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4103 cp != NULL;
4104 cp = next_cp)
4105 {
4106 if (cp->first == allocno)
4107 {
4108 next_cp = cp->next_first_allocno_copy;
4109 another_allocno = cp->second;
4110 }
4111 else if (cp->second == allocno)
4112 {
4113 next_cp = cp->next_second_allocno_copy;
4114 another_allocno = cp->first;
4115 }
4116 else
4117 gcc_unreachable ();
4118 if (cp->insn == NULL_RTX)
4119 continue;
4120 if (bitmap_bit_p (&slot->spilled_regs,
4121 ALLOCNO_REGNO (another_allocno)))
4122 cost += cp->freq;
4123 }
4124 if (cost > best_cost)
4125 {
4126 best_cost = cost;
4127 best_slot_num = slot_num;
4128 }
4129 cont:
4130 ;
4131 }
4132 if (best_cost >= 0)
4133 {
4134 slot_num = best_slot_num;
4135 slot = &ira_spilled_reg_stack_slots[slot_num];
4136 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4137 x = slot->mem;
4138 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4139 }
4140 }
4141 if (x != NULL_RTX)
4142 {
4143 ira_assert (slot->width >= total_size);
4144 #ifdef ENABLE_IRA_CHECKING
4145 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4146 FIRST_PSEUDO_REGISTER, i, bi)
4147 {
4148 ira_assert (! conflict_by_live_ranges_p (regno, i));
4149 }
4150 #endif
4151 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4152 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4153 {
4154 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4155 regno, REG_FREQ (regno), slot_num);
4156 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4157 FIRST_PSEUDO_REGISTER, i, bi)
4158 {
4159 if ((unsigned) regno != i)
4160 fprintf (ira_dump_file, " %d", i);
4161 }
4162 fprintf (ira_dump_file, "\n");
4163 }
4164 }
4165 return x;
4166 }
4167
4168 /* This is called by reload every time a new stack slot X with
4169 TOTAL_SIZE was allocated for REGNO. We store this info for
4170 subsequent ira_reuse_stack_slot calls. */
4171 void
4172 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4173 {
4174 struct ira_spilled_reg_stack_slot *slot;
4175 int slot_num;
4176 ira_allocno_t allocno;
4177
4178 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4179 allocno = ira_regno_allocno_map[regno];
4180 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4181 if (slot_num == -1)
4182 {
4183 slot_num = ira_spilled_reg_stack_slots_num++;
4184 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4185 }
4186 slot = &ira_spilled_reg_stack_slots[slot_num];
4187 INIT_REG_SET (&slot->spilled_regs);
4188 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4189 slot->mem = x;
4190 slot->width = total_size;
4191 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4192 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4193 regno, REG_FREQ (regno), slot_num);
4194 }
4195
4196
4197 /* Return spill cost for pseudo-registers whose numbers are in array
4198 REGNOS (with a negative number as an end marker) for reload with
4199 given IN and OUT for INSN. Return also number points (through
4200 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4201 the register pressure is high, number of references of the
4202 pseudo-registers (through NREFS), number of callee-clobbered
4203 hard-registers occupied by the pseudo-registers (through
4204 CALL_USED_COUNT), and the first hard regno occupied by the
4205 pseudo-registers (through FIRST_HARD_REGNO). */
4206 static int
4207 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
4208 int *excess_pressure_live_length,
4209 int *nrefs, int *call_used_count, int *first_hard_regno)
4210 {
4211 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4212 bool in_p, out_p;
4213 int length;
4214 ira_allocno_t a;
4215
4216 *nrefs = 0;
4217 for (length = count = cost = i = 0;; i++)
4218 {
4219 regno = regnos[i];
4220 if (regno < 0)
4221 break;
4222 *nrefs += REG_N_REFS (regno);
4223 hard_regno = reg_renumber[regno];
4224 ira_assert (hard_regno >= 0);
4225 a = ira_regno_allocno_map[regno];
4226 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4227 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4228 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4229 for (j = 0; j < nregs; j++)
4230 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4231 break;
4232 if (j == nregs)
4233 count++;
4234 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4235 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4236 if ((in_p || out_p)
4237 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4238 {
4239 saved_cost = 0;
4240 if (in_p)
4241 saved_cost += ira_memory_move_cost
4242 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4243 if (out_p)
4244 saved_cost
4245 += ira_memory_move_cost
4246 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4247 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4248 }
4249 }
4250 *excess_pressure_live_length = length;
4251 *call_used_count = count;
4252 hard_regno = -1;
4253 if (regnos[0] >= 0)
4254 {
4255 hard_regno = reg_renumber[regnos[0]];
4256 }
4257 *first_hard_regno = hard_regno;
4258 return cost;
4259 }
4260
4261 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4262 REGNOS is better than spilling pseudo-registers with numbers in
4263 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4264 function used by the reload pass to make better register spilling
4265 decisions. */
4266 bool
4267 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4268 rtx in, rtx out, rtx insn)
4269 {
4270 int cost, other_cost;
4271 int length, other_length;
4272 int nrefs, other_nrefs;
4273 int call_used_count, other_call_used_count;
4274 int hard_regno, other_hard_regno;
4275
4276 cost = calculate_spill_cost (regnos, in, out, insn,
4277 &length, &nrefs, &call_used_count, &hard_regno);
4278 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4279 &other_length, &other_nrefs,
4280 &other_call_used_count,
4281 &other_hard_regno);
4282 if (nrefs == 0 && other_nrefs != 0)
4283 return true;
4284 if (nrefs != 0 && other_nrefs == 0)
4285 return false;
4286 if (cost != other_cost)
4287 return cost < other_cost;
4288 if (length != other_length)
4289 return length > other_length;
4290 #ifdef REG_ALLOC_ORDER
4291 if (hard_regno >= 0 && other_hard_regno >= 0)
4292 return (inv_reg_alloc_order[hard_regno]
4293 < inv_reg_alloc_order[other_hard_regno]);
4294 #else
4295 if (call_used_count != other_call_used_count)
4296 return call_used_count > other_call_used_count;
4297 #endif
4298 return false;
4299 }
4300
4301 \f
4302
4303 /* Allocate and initialize data necessary for assign_hard_reg. */
4304 void
4305 ira_initiate_assign (void)
4306 {
4307 sorted_allocnos
4308 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4309 * ira_allocnos_num);
4310 consideration_allocno_bitmap = ira_allocate_bitmap ();
4311 initiate_cost_update ();
4312 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4313 }
4314
4315 /* Deallocate data used by assign_hard_reg. */
4316 void
4317 ira_finish_assign (void)
4318 {
4319 ira_free (sorted_allocnos);
4320 ira_free_bitmap (consideration_allocno_bitmap);
4321 finish_cost_update ();
4322 ira_free (allocno_priorities);
4323 }
4324
4325 \f
4326
4327 /* Entry function doing color-based register allocation. */
4328 static void
4329 color (void)
4330 {
4331 allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
4332 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4333 ira_initiate_assign ();
4334 do_coloring ();
4335 ira_finish_assign ();
4336 VEC_free (ira_allocno_t, heap, allocno_stack_vec);
4337 move_spill_restore ();
4338 }
4339
4340 \f
4341
4342 /* This page contains a simple register allocator without usage of
4343 allocno conflicts. This is used for fast allocation for -O0. */
4344
4345 /* Do register allocation by not using allocno conflicts. It uses
4346 only allocno live ranges. The algorithm is close to Chow's
4347 priority coloring. */
4348 static void
4349 fast_allocation (void)
4350 {
4351 int i, j, k, num, class_size, hard_regno;
4352 #ifdef STACK_REGS
4353 bool no_stack_reg_p;
4354 #endif
4355 enum reg_class aclass;
4356 enum machine_mode mode;
4357 ira_allocno_t a;
4358 ira_allocno_iterator ai;
4359 live_range_t r;
4360 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4361
4362 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4363 * ira_allocnos_num);
4364 num = 0;
4365 FOR_EACH_ALLOCNO (a, ai)
4366 sorted_allocnos[num++] = a;
4367 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4368 setup_allocno_priorities (sorted_allocnos, num);
4369 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4370 * ira_max_point);
4371 for (i = 0; i < ira_max_point; i++)
4372 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4373 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4374 allocno_priority_compare_func);
4375 for (i = 0; i < num; i++)
4376 {
4377 int nr, l;
4378
4379 a = sorted_allocnos[i];
4380 nr = ALLOCNO_NUM_OBJECTS (a);
4381 CLEAR_HARD_REG_SET (conflict_hard_regs);
4382 for (l = 0; l < nr; l++)
4383 {
4384 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4385 IOR_HARD_REG_SET (conflict_hard_regs,
4386 OBJECT_CONFLICT_HARD_REGS (obj));
4387 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4388 for (j = r->start; j <= r->finish; j++)
4389 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4390 }
4391 aclass = ALLOCNO_CLASS (a);
4392 ALLOCNO_ASSIGNED_P (a) = true;
4393 ALLOCNO_HARD_REGNO (a) = -1;
4394 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4395 conflict_hard_regs))
4396 continue;
4397 mode = ALLOCNO_MODE (a);
4398 #ifdef STACK_REGS
4399 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4400 #endif
4401 class_size = ira_class_hard_regs_num[aclass];
4402 for (j = 0; j < class_size; j++)
4403 {
4404 hard_regno = ira_class_hard_regs[aclass][j];
4405 #ifdef STACK_REGS
4406 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4407 && hard_regno <= LAST_STACK_REG)
4408 continue;
4409 #endif
4410 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4411 || (TEST_HARD_REG_BIT
4412 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4413 continue;
4414 ALLOCNO_HARD_REGNO (a) = hard_regno;
4415 for (l = 0; l < nr; l++)
4416 {
4417 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4418 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4419 for (k = r->start; k <= r->finish; k++)
4420 IOR_HARD_REG_SET (used_hard_regs[k],
4421 ira_reg_mode_hard_regset[hard_regno][mode]);
4422 }
4423 break;
4424 }
4425 }
4426 ira_free (sorted_allocnos);
4427 ira_free (used_hard_regs);
4428 ira_free (allocno_priorities);
4429 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4430 ira_print_disposition (ira_dump_file);
4431 }
4432
4433 \f
4434
4435 /* Entry function doing coloring. */
4436 void
4437 ira_color (void)
4438 {
4439 ira_allocno_t a;
4440 ira_allocno_iterator ai;
4441
4442 /* Setup updated costs. */
4443 FOR_EACH_ALLOCNO (a, ai)
4444 {
4445 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4446 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4447 }
4448 if (ira_conflicts_p)
4449 color ();
4450 else
4451 fast_allocation ();
4452 }