]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-conflicts.c
2010-02-09 Vladimir Makarov <vmakarov@redhat.com>
[thirdparty/gcc.git] / gcc / ira-conflicts.c
1 /* IRA conflict builder.
2 Copyright (C) 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "regs.h"
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "target.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "toplev.h"
36 #include "params.h"
37 #include "df.h"
38 #include "sparseset.h"
39 #include "ira-int.h"
40 #include "addresses.h"
41
42 /* This file contains code responsible for allocno conflict creation,
43 allocno copy creation and allocno info accumulation on upper level
44 regions. */
45
46 /* ira_allocnos_num array of arrays of bits, recording whether two
47 allocno's conflict (can't go in the same hardware register).
48
49 Some arrays will be used as conflict bit vector of the
50 corresponding allocnos see function build_allocno_conflicts. */
51 static IRA_INT_TYPE **conflicts;
52
53 /* Macro to test a conflict of A1 and A2 in `conflicts'. */
54 #define CONFLICT_ALLOCNO_P(A1, A2) \
55 (ALLOCNO_MIN (A1) <= ALLOCNO_CONFLICT_ID (A2) \
56 && ALLOCNO_CONFLICT_ID (A2) <= ALLOCNO_MAX (A1) \
57 && TEST_ALLOCNO_SET_BIT (conflicts[ALLOCNO_NUM (A1)], \
58 ALLOCNO_CONFLICT_ID (A2), \
59 ALLOCNO_MIN (A1), \
60 ALLOCNO_MAX (A1)))
61
62 \f
63
64 /* Build allocno conflict table by processing allocno live ranges.
65 Return true if the table was built. The table is not built if it
66 is too big. */
67 static bool
68 build_conflict_bit_table (void)
69 {
70 int i, num, id, allocated_words_num, conflict_bit_vec_words_num;
71 unsigned int j;
72 enum reg_class cover_class;
73 ira_allocno_t allocno, live_a;
74 allocno_live_range_t r;
75 ira_allocno_iterator ai;
76 sparseset allocnos_live;
77 int allocno_set_words;
78
79 allocno_set_words = (ira_allocnos_num + IRA_INT_BITS - 1) / IRA_INT_BITS;
80 allocated_words_num = 0;
81 FOR_EACH_ALLOCNO (allocno, ai)
82 {
83 if (ALLOCNO_MAX (allocno) < ALLOCNO_MIN (allocno))
84 continue;
85 conflict_bit_vec_words_num
86 = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno) + IRA_INT_BITS)
87 / IRA_INT_BITS);
88 allocated_words_num += conflict_bit_vec_words_num;
89 if ((unsigned long long) allocated_words_num * sizeof (IRA_INT_TYPE)
90 > (unsigned long long) IRA_MAX_CONFLICT_TABLE_SIZE * 1024 * 1024)
91 {
92 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
93 fprintf
94 (ira_dump_file,
95 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
96 IRA_MAX_CONFLICT_TABLE_SIZE);
97 return false;
98 }
99 }
100 allocnos_live = sparseset_alloc (ira_allocnos_num);
101 conflicts = (IRA_INT_TYPE **) ira_allocate (sizeof (IRA_INT_TYPE *)
102 * ira_allocnos_num);
103 allocated_words_num = 0;
104 FOR_EACH_ALLOCNO (allocno, ai)
105 {
106 num = ALLOCNO_NUM (allocno);
107 if (ALLOCNO_MAX (allocno) < ALLOCNO_MIN (allocno))
108 {
109 conflicts[num] = NULL;
110 continue;
111 }
112 conflict_bit_vec_words_num
113 = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno) + IRA_INT_BITS)
114 / IRA_INT_BITS);
115 allocated_words_num += conflict_bit_vec_words_num;
116 conflicts[num]
117 = (IRA_INT_TYPE *) ira_allocate (sizeof (IRA_INT_TYPE)
118 * conflict_bit_vec_words_num);
119 memset (conflicts[num], 0,
120 sizeof (IRA_INT_TYPE) * conflict_bit_vec_words_num);
121 }
122 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
123 fprintf
124 (ira_dump_file,
125 "+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
126 (long) allocated_words_num * sizeof (IRA_INT_TYPE),
127 (long) allocno_set_words * ira_allocnos_num * sizeof (IRA_INT_TYPE));
128 for (i = 0; i < ira_max_point; i++)
129 {
130 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
131 {
132 allocno = r->allocno;
133 num = ALLOCNO_NUM (allocno);
134 id = ALLOCNO_CONFLICT_ID (allocno);
135 cover_class = ALLOCNO_COVER_CLASS (allocno);
136 sparseset_set_bit (allocnos_live, num);
137 EXECUTE_IF_SET_IN_SPARSESET (allocnos_live, j)
138 {
139 live_a = ira_allocnos[j];
140 if (ira_reg_classes_intersect_p
141 [cover_class][ALLOCNO_COVER_CLASS (live_a)]
142 /* Don't set up conflict for the allocno with itself. */
143 && num != (int) j)
144 {
145 SET_ALLOCNO_SET_BIT (conflicts[num],
146 ALLOCNO_CONFLICT_ID (live_a),
147 ALLOCNO_MIN (allocno),
148 ALLOCNO_MAX (allocno));
149 SET_ALLOCNO_SET_BIT (conflicts[j], id,
150 ALLOCNO_MIN (live_a),
151 ALLOCNO_MAX (live_a));
152 }
153 }
154 }
155
156 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
157 sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
158 }
159 sparseset_free (allocnos_live);
160 return true;
161 }
162
163 \f
164
165 /* Return TRUE if the operand constraint STR is commutative. */
166 static bool
167 commutative_constraint_p (const char *str)
168 {
169 bool ignore_p;
170 int c;
171
172 for (ignore_p = false;;)
173 {
174 c = *str;
175 if (c == '\0')
176 break;
177 str += CONSTRAINT_LEN (c, str);
178 if (c == '#')
179 ignore_p = true;
180 else if (c == ',')
181 ignore_p = false;
182 else if (! ignore_p)
183 {
184 /* Usually `%' is the first constraint character but the
185 documentation does not require this. */
186 if (c == '%')
187 return true;
188 }
189 }
190 return false;
191 }
192
193 /* Return the number of the operand which should be the same in any
194 case as operand with number OP_NUM (or negative value if there is
195 no such operand). If USE_COMMUT_OP_P is TRUE, the function makes
196 temporarily commutative operand exchange before this. The function
197 takes only really possible alternatives into consideration. */
198 static int
199 get_dup_num (int op_num, bool use_commut_op_p)
200 {
201 int curr_alt, c, original, dup;
202 bool ignore_p, commut_op_used_p;
203 const char *str;
204 rtx op;
205
206 if (op_num < 0 || recog_data.n_alternatives == 0)
207 return -1;
208 op = recog_data.operand[op_num];
209 commut_op_used_p = true;
210 if (use_commut_op_p)
211 {
212 if (commutative_constraint_p (recog_data.constraints[op_num]))
213 op_num++;
214 else if (op_num > 0 && commutative_constraint_p (recog_data.constraints
215 [op_num - 1]))
216 op_num--;
217 else
218 commut_op_used_p = false;
219 }
220 str = recog_data.constraints[op_num];
221 for (ignore_p = false, original = -1, curr_alt = 0;;)
222 {
223 c = *str;
224 if (c == '\0')
225 break;
226 if (c == '#')
227 ignore_p = true;
228 else if (c == ',')
229 {
230 curr_alt++;
231 ignore_p = false;
232 }
233 else if (! ignore_p)
234 switch (c)
235 {
236 case 'X':
237 return -1;
238
239 case 'm':
240 case 'o':
241 /* Accept a register which might be placed in memory. */
242 return -1;
243 break;
244
245 case 'V':
246 case '<':
247 case '>':
248 break;
249
250 case 'p':
251 if (address_operand (op, VOIDmode))
252 return -1;
253 break;
254
255 case 'g':
256 return -1;
257
258 case 'r':
259 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
260 case 'h': case 'j': case 'k': case 'l':
261 case 'q': case 't': case 'u':
262 case 'v': case 'w': case 'x': case 'y': case 'z':
263 case 'A': case 'B': case 'C': case 'D':
264 case 'Q': case 'R': case 'S': case 'T': case 'U':
265 case 'W': case 'Y': case 'Z':
266 {
267 enum reg_class cl;
268
269 cl = (c == 'r'
270 ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (c, str));
271 if (cl != NO_REGS)
272 return -1;
273 #ifdef EXTRA_CONSTRAINT_STR
274 else if (EXTRA_CONSTRAINT_STR (op, c, str))
275 return -1;
276 #endif
277 break;
278 }
279
280 case '0': case '1': case '2': case '3': case '4':
281 case '5': case '6': case '7': case '8': case '9':
282 if (original != -1 && original != c)
283 return -1;
284 original = c;
285 break;
286 }
287 str += CONSTRAINT_LEN (c, str);
288 }
289 if (original == -1)
290 return -1;
291 dup = original - '0';
292 if (use_commut_op_p)
293 {
294 if (commutative_constraint_p (recog_data.constraints[dup]))
295 dup++;
296 else if (dup > 0
297 && commutative_constraint_p (recog_data.constraints[dup -1]))
298 dup--;
299 else if (! commut_op_used_p)
300 return -1;
301 }
302 return dup;
303 }
304
305 /* Check that X is REG or SUBREG of REG. */
306 #define REG_SUBREG_P(x) \
307 (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))))
308
309 /* Return X if X is a REG, otherwise it should be SUBREG of REG and
310 the function returns the reg in this case. *OFFSET will be set to
311 0 in the first case or the regno offset in the first case. */
312 static rtx
313 go_through_subreg (rtx x, int *offset)
314 {
315 rtx reg;
316
317 *offset = 0;
318 if (REG_P (x))
319 return x;
320 ira_assert (GET_CODE (x) == SUBREG);
321 reg = SUBREG_REG (x);
322 ira_assert (REG_P (reg));
323 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
324 *offset = subreg_regno_offset (REGNO (reg), GET_MODE (reg),
325 SUBREG_BYTE (x), GET_MODE (x));
326 else
327 *offset = (SUBREG_BYTE (x) / REGMODE_NATURAL_SIZE (GET_MODE (x)));
328 return reg;
329 }
330
331 /* Process registers REG1 and REG2 in move INSN with execution
332 frequency FREQ. The function also processes the registers in a
333 potential move insn (INSN == NULL in this case) with frequency
334 FREQ. The function can modify hard register costs of the
335 corresponding allocnos or create a copy involving the corresponding
336 allocnos. The function does nothing if the both registers are hard
337 registers. When nothing is changed, the function returns
338 FALSE. */
339 static bool
340 process_regs_for_copy (rtx reg1, rtx reg2, bool constraint_p,
341 rtx insn, int freq)
342 {
343 int allocno_preferenced_hard_regno, cost, index, offset1, offset2;
344 bool only_regs_p;
345 ira_allocno_t a;
346 enum reg_class rclass, cover_class;
347 enum machine_mode mode;
348 ira_copy_t cp;
349 ira_loop_tree_node_t parent;
350
351 gcc_assert (REG_SUBREG_P (reg1) && REG_SUBREG_P (reg2));
352 only_regs_p = REG_P (reg1) && REG_P (reg2);
353 reg1 = go_through_subreg (reg1, &offset1);
354 reg2 = go_through_subreg (reg2, &offset2);
355 /* Set up hard regno preferenced by allocno. If allocno gets the
356 hard regno the copy (or potential move) insn will be removed. */
357 if (HARD_REGISTER_P (reg1))
358 {
359 if (HARD_REGISTER_P (reg2))
360 return false;
361 allocno_preferenced_hard_regno = REGNO (reg1) + offset1 - offset2;
362 a = ira_curr_regno_allocno_map[REGNO (reg2)];
363 }
364 else if (HARD_REGISTER_P (reg2))
365 {
366 allocno_preferenced_hard_regno = REGNO (reg2) + offset2 - offset1;
367 a = ira_curr_regno_allocno_map[REGNO (reg1)];
368 }
369 else if (!CONFLICT_ALLOCNO_P (ira_curr_regno_allocno_map[REGNO (reg1)],
370 ira_curr_regno_allocno_map[REGNO (reg2)])
371 && offset1 == offset2)
372 {
373 cp = ira_add_allocno_copy (ira_curr_regno_allocno_map[REGNO (reg1)],
374 ira_curr_regno_allocno_map[REGNO (reg2)],
375 freq, constraint_p, insn,
376 ira_curr_loop_tree_node);
377 bitmap_set_bit (ira_curr_loop_tree_node->local_copies, cp->num);
378 return true;
379 }
380 else
381 return false;
382 if (! IN_RANGE (allocno_preferenced_hard_regno, 0, FIRST_PSEUDO_REGISTER - 1))
383 /* Can not be tied. */
384 return false;
385 rclass = REGNO_REG_CLASS (allocno_preferenced_hard_regno);
386 mode = ALLOCNO_MODE (a);
387 cover_class = ALLOCNO_COVER_CLASS (a);
388 if (only_regs_p && insn != NULL_RTX
389 && reg_class_size[rclass] <= (unsigned) CLASS_MAX_NREGS (rclass, mode))
390 /* It is already taken into account in ira-costs.c. */
391 return false;
392 index = ira_class_hard_reg_index[cover_class][allocno_preferenced_hard_regno];
393 if (index < 0)
394 /* Can not be tied. It is not in the cover class. */
395 return false;
396 if (HARD_REGISTER_P (reg1))
397 cost = ira_get_register_move_cost (mode, cover_class, rclass) * freq;
398 else
399 cost = ira_get_register_move_cost (mode, rclass, cover_class) * freq;
400 for (;;)
401 {
402 ira_allocate_and_set_costs
403 (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
404 ALLOCNO_COVER_CLASS_COST (a));
405 ira_allocate_and_set_costs
406 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class, 0);
407 ALLOCNO_HARD_REG_COSTS (a)[index] -= cost;
408 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index] -= cost;
409 if (ALLOCNO_HARD_REG_COSTS (a)[index] < ALLOCNO_COVER_CLASS_COST (a))
410 ALLOCNO_COVER_CLASS_COST (a) = ALLOCNO_HARD_REG_COSTS (a)[index];
411 if (ALLOCNO_CAP (a) != NULL)
412 a = ALLOCNO_CAP (a);
413 else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL
414 || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)]) == NULL)
415 break;
416 }
417 return true;
418 }
419
420 /* Process all of the output registers of the current insn which are
421 not bound (BOUND_P) and the input register REG (its operand number
422 OP_NUM) which dies in the insn as if there were a move insn between
423 them with frequency FREQ. */
424 static void
425 process_reg_shuffles (rtx reg, int op_num, int freq, bool *bound_p)
426 {
427 int i;
428 rtx another_reg;
429
430 gcc_assert (REG_SUBREG_P (reg));
431 for (i = 0; i < recog_data.n_operands; i++)
432 {
433 another_reg = recog_data.operand[i];
434
435 if (!REG_SUBREG_P (another_reg) || op_num == i
436 || recog_data.operand_type[i] != OP_OUT
437 || bound_p[i])
438 continue;
439
440 process_regs_for_copy (reg, another_reg, false, NULL_RTX, freq);
441 }
442 }
443
444 /* Process INSN and create allocno copies if necessary. For example,
445 it might be because INSN is a pseudo-register move or INSN is two
446 operand insn. */
447 static void
448 add_insn_allocno_copies (rtx insn)
449 {
450 rtx set, operand, dup, link;
451 const char *str;
452 bool commut_p, bound_p[MAX_RECOG_OPERANDS];
453 int i, j, n, freq;
454
455 freq = REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn));
456 if (freq == 0)
457 freq = 1;
458 if ((set = single_set (insn)) != NULL_RTX
459 && REG_SUBREG_P (SET_DEST (set)) && REG_SUBREG_P (SET_SRC (set))
460 && ! side_effects_p (set)
461 && find_reg_note (insn, REG_DEAD,
462 REG_P (SET_SRC (set))
463 ? SET_SRC (set)
464 : SUBREG_REG (SET_SRC (set))) != NULL_RTX)
465 {
466 process_regs_for_copy (SET_DEST (set), SET_SRC (set), false, insn, freq);
467 return;
468 }
469 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
470 if (REG_NOTE_KIND (link) == REG_DEAD)
471 break;
472 if (! link)
473 return;
474 extract_insn (insn);
475 for (i = 0; i < recog_data.n_operands; i++)
476 bound_p[i] = false;
477 for (i = 0; i < recog_data.n_operands; i++)
478 {
479 operand = recog_data.operand[i];
480 if (! REG_SUBREG_P (operand))
481 continue;
482 str = recog_data.constraints[i];
483 while (*str == ' ' || *str == '\t')
484 str++;
485 for (j = 0, commut_p = false; j < 2; j++, commut_p = true)
486 if ((n = get_dup_num (i, commut_p)) >= 0)
487 {
488 bound_p[n] = true;
489 dup = recog_data.operand[n];
490 if (REG_SUBREG_P (dup)
491 && find_reg_note (insn, REG_DEAD,
492 REG_P (operand)
493 ? operand
494 : SUBREG_REG (operand)) != NULL_RTX)
495 process_regs_for_copy (operand, dup, true, NULL_RTX, freq);
496 }
497 }
498 for (i = 0; i < recog_data.n_operands; i++)
499 {
500 operand = recog_data.operand[i];
501 if (REG_SUBREG_P (operand)
502 && find_reg_note (insn, REG_DEAD,
503 REG_P (operand)
504 ? operand : SUBREG_REG (operand)) != NULL_RTX)
505 /* If an operand dies, prefer its hard register for the output
506 operands by decreasing the hard register cost or creating
507 the corresponding allocno copies. The cost will not
508 correspond to a real move insn cost, so make the frequency
509 smaller. */
510 process_reg_shuffles (operand, i, freq < 8 ? 1 : freq / 8, bound_p);
511 }
512 }
513
514 /* Add copies originated from BB given by LOOP_TREE_NODE. */
515 static void
516 add_copies (ira_loop_tree_node_t loop_tree_node)
517 {
518 basic_block bb;
519 rtx insn;
520
521 bb = loop_tree_node->bb;
522 if (bb == NULL)
523 return;
524 FOR_BB_INSNS (bb, insn)
525 if (NONDEBUG_INSN_P (insn))
526 add_insn_allocno_copies (insn);
527 }
528
529 /* Propagate copies the corresponding allocnos on upper loop tree
530 level. */
531 static void
532 propagate_copies (void)
533 {
534 ira_copy_t cp;
535 ira_copy_iterator ci;
536 ira_allocno_t a1, a2, parent_a1, parent_a2;
537 ira_loop_tree_node_t parent;
538
539 FOR_EACH_COPY (cp, ci)
540 {
541 a1 = cp->first;
542 a2 = cp->second;
543 if (ALLOCNO_LOOP_TREE_NODE (a1) == ira_loop_tree_root)
544 continue;
545 ira_assert ((ALLOCNO_LOOP_TREE_NODE (a2) != ira_loop_tree_root));
546 parent = ALLOCNO_LOOP_TREE_NODE (a1)->parent;
547 if ((parent_a1 = ALLOCNO_CAP (a1)) == NULL)
548 parent_a1 = parent->regno_allocno_map[ALLOCNO_REGNO (a1)];
549 if ((parent_a2 = ALLOCNO_CAP (a2)) == NULL)
550 parent_a2 = parent->regno_allocno_map[ALLOCNO_REGNO (a2)];
551 ira_assert (parent_a1 != NULL && parent_a2 != NULL);
552 if (! CONFLICT_ALLOCNO_P (parent_a1, parent_a2))
553 ira_add_allocno_copy (parent_a1, parent_a2, cp->freq,
554 cp->constraint_p, cp->insn, cp->loop_tree_node);
555 }
556 }
557
558 /* Array used to collect all conflict allocnos for given allocno. */
559 static ira_allocno_t *collected_conflict_allocnos;
560
561 /* Build conflict vectors or bit conflict vectors (whatever is more
562 profitable) for allocno A from the conflict table and propagate the
563 conflicts to upper level allocno. */
564 static void
565 build_allocno_conflicts (ira_allocno_t a)
566 {
567 int i, px, parent_num;
568 int conflict_bit_vec_words_num;
569 ira_loop_tree_node_t parent;
570 ira_allocno_t parent_a, another_a, another_parent_a;
571 ira_allocno_t *vec;
572 IRA_INT_TYPE *allocno_conflicts;
573 ira_allocno_set_iterator asi;
574
575 allocno_conflicts = conflicts[ALLOCNO_NUM (a)];
576 px = 0;
577 FOR_EACH_ALLOCNO_IN_SET (allocno_conflicts,
578 ALLOCNO_MIN (a), ALLOCNO_MAX (a), i, asi)
579 {
580 another_a = ira_conflict_id_allocno_map[i];
581 ira_assert (ira_reg_classes_intersect_p
582 [ALLOCNO_COVER_CLASS (a)][ALLOCNO_COVER_CLASS (another_a)]);
583 collected_conflict_allocnos[px++] = another_a;
584 }
585 if (ira_conflict_vector_profitable_p (a, px))
586 {
587 ira_allocate_allocno_conflict_vec (a, px);
588 vec = (ira_allocno_t*) ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a);
589 memcpy (vec, collected_conflict_allocnos, sizeof (ira_allocno_t) * px);
590 vec[px] = NULL;
591 ALLOCNO_CONFLICT_ALLOCNOS_NUM (a) = px;
592 }
593 else
594 {
595 ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) = conflicts[ALLOCNO_NUM (a)];
596 if (ALLOCNO_MAX (a) < ALLOCNO_MIN (a))
597 conflict_bit_vec_words_num = 0;
598 else
599 conflict_bit_vec_words_num
600 = ((ALLOCNO_MAX (a) - ALLOCNO_MIN (a) + IRA_INT_BITS)
601 / IRA_INT_BITS);
602 ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE (a)
603 = conflict_bit_vec_words_num * sizeof (IRA_INT_TYPE);
604 }
605 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
606 if ((parent_a = ALLOCNO_CAP (a)) == NULL
607 && (parent == NULL
608 || (parent_a = parent->regno_allocno_map[ALLOCNO_REGNO (a)])
609 == NULL))
610 return;
611 ira_assert (parent != NULL);
612 ira_assert (ALLOCNO_COVER_CLASS (a) == ALLOCNO_COVER_CLASS (parent_a));
613 parent_num = ALLOCNO_NUM (parent_a);
614 FOR_EACH_ALLOCNO_IN_SET (allocno_conflicts,
615 ALLOCNO_MIN (a), ALLOCNO_MAX (a), i, asi)
616 {
617 another_a = ira_conflict_id_allocno_map[i];
618 ira_assert (ira_reg_classes_intersect_p
619 [ALLOCNO_COVER_CLASS (a)][ALLOCNO_COVER_CLASS (another_a)]);
620 if ((another_parent_a = ALLOCNO_CAP (another_a)) == NULL
621 && (another_parent_a = (parent->regno_allocno_map
622 [ALLOCNO_REGNO (another_a)])) == NULL)
623 continue;
624 ira_assert (ALLOCNO_NUM (another_parent_a) >= 0);
625 ira_assert (ALLOCNO_COVER_CLASS (another_a)
626 == ALLOCNO_COVER_CLASS (another_parent_a));
627 SET_ALLOCNO_SET_BIT (conflicts[parent_num],
628 ALLOCNO_CONFLICT_ID (another_parent_a),
629 ALLOCNO_MIN (parent_a),
630 ALLOCNO_MAX (parent_a));
631 }
632 }
633
634 /* Build conflict vectors or bit conflict vectors (whatever is more
635 profitable) of all allocnos from the conflict table. */
636 static void
637 build_conflicts (void)
638 {
639 int i;
640 ira_allocno_t a, cap;
641
642 collected_conflict_allocnos
643 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
644 * ira_allocnos_num);
645 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
646 for (a = ira_regno_allocno_map[i];
647 a != NULL;
648 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
649 {
650 build_allocno_conflicts (a);
651 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
652 build_allocno_conflicts (cap);
653 }
654 ira_free (collected_conflict_allocnos);
655 }
656
657 \f
658
659 /* Print hard reg set SET with TITLE to FILE. */
660 static void
661 print_hard_reg_set (FILE *file, const char *title, HARD_REG_SET set)
662 {
663 int i, start;
664
665 fputs (title, file);
666 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
667 {
668 if (TEST_HARD_REG_BIT (set, i))
669 {
670 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
671 start = i;
672 }
673 if (start >= 0
674 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
675 {
676 if (start == i - 1)
677 fprintf (file, " %d", start);
678 else if (start == i - 2)
679 fprintf (file, " %d %d", start, start + 1);
680 else
681 fprintf (file, " %d-%d", start, i - 1);
682 start = -1;
683 }
684 }
685 putc ('\n', file);
686 }
687
688 /* Print information about allocno or only regno (if REG_P) conflicts
689 to FILE. */
690 static void
691 print_conflicts (FILE *file, bool reg_p)
692 {
693 ira_allocno_t a;
694 ira_allocno_iterator ai;
695 HARD_REG_SET conflicting_hard_regs;
696
697 FOR_EACH_ALLOCNO (a, ai)
698 {
699 ira_allocno_t conflict_a;
700 ira_allocno_conflict_iterator aci;
701 basic_block bb;
702
703 if (reg_p)
704 fprintf (file, ";; r%d", ALLOCNO_REGNO (a));
705 else
706 {
707 fprintf (file, ";; a%d(r%d,", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
708 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
709 fprintf (file, "b%d", bb->index);
710 else
711 fprintf (file, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
712 putc (')', file);
713 }
714 fputs (" conflicts:", file);
715 if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a) != NULL)
716 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
717 {
718 if (reg_p)
719 fprintf (file, " r%d,", ALLOCNO_REGNO (conflict_a));
720 else
721 {
722 fprintf (file, " a%d(r%d,", ALLOCNO_NUM (conflict_a),
723 ALLOCNO_REGNO (conflict_a));
724 if ((bb = ALLOCNO_LOOP_TREE_NODE (conflict_a)->bb) != NULL)
725 fprintf (file, "b%d)", bb->index);
726 else
727 fprintf (file, "l%d)",
728 ALLOCNO_LOOP_TREE_NODE (conflict_a)->loop->num);
729 }
730 }
731 COPY_HARD_REG_SET (conflicting_hard_regs,
732 ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
733 AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
734 AND_HARD_REG_SET (conflicting_hard_regs,
735 reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
736 print_hard_reg_set (file, "\n;; total conflict hard regs:",
737 conflicting_hard_regs);
738 COPY_HARD_REG_SET (conflicting_hard_regs,
739 ALLOCNO_CONFLICT_HARD_REGS (a));
740 AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
741 AND_HARD_REG_SET (conflicting_hard_regs,
742 reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
743 print_hard_reg_set (file, ";; conflict hard regs:",
744 conflicting_hard_regs);
745 }
746 putc ('\n', file);
747 }
748
749 /* Print information about allocno or only regno (if REG_P) conflicts
750 to stderr. */
751 void
752 ira_debug_conflicts (bool reg_p)
753 {
754 print_conflicts (stderr, reg_p);
755 }
756
757 \f
758
759 /* Entry function which builds allocno conflicts and allocno copies
760 and accumulate some allocno info on upper level regions. */
761 void
762 ira_build_conflicts (void)
763 {
764 ira_allocno_t a;
765 ira_allocno_iterator ai;
766 HARD_REG_SET temp_hard_reg_set;
767
768 if (ira_conflicts_p)
769 {
770 ira_conflicts_p = build_conflict_bit_table ();
771 if (ira_conflicts_p)
772 {
773 build_conflicts ();
774 ira_traverse_loop_tree (true, ira_loop_tree_root, NULL, add_copies);
775 /* We need finished conflict table for the subsequent call. */
776 if (flag_ira_region == IRA_REGION_ALL
777 || flag_ira_region == IRA_REGION_MIXED)
778 propagate_copies ();
779 /* Now we can free memory for the conflict table (see function
780 build_allocno_conflicts for details). */
781 FOR_EACH_ALLOCNO (a, ai)
782 {
783 if (ALLOCNO_CONFLICT_ALLOCNO_ARRAY (a)
784 != conflicts[ALLOCNO_NUM (a)])
785 ira_free (conflicts[ALLOCNO_NUM (a)]);
786 }
787 ira_free (conflicts);
788 }
789 }
790 if (! CLASS_LIKELY_SPILLED_P (base_reg_class (VOIDmode, ADDRESS, SCRATCH)))
791 CLEAR_HARD_REG_SET (temp_hard_reg_set);
792 else
793 {
794 COPY_HARD_REG_SET (temp_hard_reg_set,
795 reg_class_contents[base_reg_class (VOIDmode, ADDRESS, SCRATCH)]);
796 AND_COMPL_HARD_REG_SET (temp_hard_reg_set, ira_no_alloc_regs);
797 AND_HARD_REG_SET (temp_hard_reg_set, call_used_reg_set);
798 }
799 FOR_EACH_ALLOCNO (a, ai)
800 {
801 reg_attrs *attrs;
802 tree decl;
803
804 if ((! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
805 /* For debugging purposes don't put user defined variables in
806 callee-clobbered registers. */
807 || (optimize == 0
808 && (attrs = REG_ATTRS (regno_reg_rtx [ALLOCNO_REGNO (a)])) != NULL
809 && (decl = attrs->decl) != NULL
810 && VAR_OR_FUNCTION_DECL_P (decl)
811 && ! DECL_ARTIFICIAL (decl)))
812 {
813 IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
814 call_used_reg_set);
815 IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
816 call_used_reg_set);
817 }
818 else if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
819 {
820 IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
821 no_caller_save_reg_set);
822 IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a),
823 temp_hard_reg_set);
824 IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
825 no_caller_save_reg_set);
826 IOR_HARD_REG_SET (ALLOCNO_CONFLICT_HARD_REGS (a),
827 temp_hard_reg_set);
828 }
829 }
830 if (optimize && ira_conflicts_p
831 && internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
832 print_conflicts (ira_dump_file, false);
833 }