]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-costs.c
dojump.h: New header file.
[thirdparty/gcc.git] / gcc / ira-costs.c
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hash-table.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "symtab.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "vec.h"
32 #include "machmode.h"
33 #include "input.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "statistics.h"
37 #include "double-int.h"
38 #include "real.h"
39 #include "fixed-value.h"
40 #include "alias.h"
41 #include "wide-int.h"
42 #include "inchash.h"
43 #include "tree.h"
44 #include "insn-config.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "emit-rtl.h"
50 #include "varasm.h"
51 #include "stmt.h"
52 #include "expr.h"
53 #include "tm_p.h"
54 #include "predict.h"
55 #include "dominance.h"
56 #include "cfg.h"
57 #include "basic-block.h"
58 #include "regs.h"
59 #include "addresses.h"
60 #include "recog.h"
61 #include "reload.h"
62 #include "diagnostic-core.h"
63 #include "target.h"
64 #include "params.h"
65 #include "ira-int.h"
66
67 /* The flags is set up every time when we calculate pseudo register
68 classes through function ira_set_pseudo_classes. */
69 static bool pseudo_classes_defined_p = false;
70
71 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
72 static bool allocno_p;
73
74 /* Number of elements in array `costs'. */
75 static int cost_elements_num;
76
77 /* The `costs' struct records the cost of using hard registers of each
78 class considered for the calculation and of using memory for each
79 allocno or pseudo. */
80 struct costs
81 {
82 int mem_cost;
83 /* Costs for register classes start here. We process only some
84 allocno classes. */
85 int cost[1];
86 };
87
88 #define max_struct_costs_size \
89 (this_target_ira_int->x_max_struct_costs_size)
90 #define init_cost \
91 (this_target_ira_int->x_init_cost)
92 #define temp_costs \
93 (this_target_ira_int->x_temp_costs)
94 #define op_costs \
95 (this_target_ira_int->x_op_costs)
96 #define this_op_costs \
97 (this_target_ira_int->x_this_op_costs)
98
99 /* Costs of each class for each allocno or pseudo. */
100 static struct costs *costs;
101
102 /* Accumulated costs of each class for each allocno. */
103 static struct costs *total_allocno_costs;
104
105 /* It is the current size of struct costs. */
106 static int struct_costs_size;
107
108 /* Return pointer to structure containing costs of allocno or pseudo
109 with given NUM in array ARR. */
110 #define COSTS(arr, num) \
111 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
112
113 /* Return index in COSTS when processing reg with REGNO. */
114 #define COST_INDEX(regno) (allocno_p \
115 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
116 : (int) regno)
117
118 /* Record register class preferences of each allocno or pseudo. Null
119 value means no preferences. It happens on the 1st iteration of the
120 cost calculation. */
121 static enum reg_class *pref;
122
123 /* Allocated buffers for pref. */
124 static enum reg_class *pref_buffer;
125
126 /* Record allocno class of each allocno with the same regno. */
127 static enum reg_class *regno_aclass;
128
129 /* Record cost gains for not allocating a register with an invariant
130 equivalence. */
131 static int *regno_equiv_gains;
132
133 /* Execution frequency of the current insn. */
134 static int frequency;
135
136 \f
137
138 /* Info about reg classes whose costs are calculated for a pseudo. */
139 struct cost_classes
140 {
141 /* Number of the cost classes in the subsequent array. */
142 int num;
143 /* Container of the cost classes. */
144 enum reg_class classes[N_REG_CLASSES];
145 /* Map reg class -> index of the reg class in the previous array.
146 -1 if it is not a cost class. */
147 int index[N_REG_CLASSES];
148 /* Map hard regno index of first class in array CLASSES containing
149 the hard regno, -1 otherwise. */
150 int hard_regno_index[FIRST_PSEUDO_REGISTER];
151 };
152
153 /* Types of pointers to the structure above. */
154 typedef struct cost_classes *cost_classes_t;
155 typedef const struct cost_classes *const_cost_classes_t;
156
157 /* Info about cost classes for each pseudo. */
158 static cost_classes_t *regno_cost_classes;
159
160 /* Helper for cost_classes hashing. */
161
162 struct cost_classes_hasher
163 {
164 typedef cost_classes value_type;
165 typedef cost_classes compare_type;
166 static inline hashval_t hash (const value_type *);
167 static inline bool equal (const value_type *, const compare_type *);
168 static inline void remove (value_type *);
169 };
170
171 /* Returns hash value for cost classes info HV. */
172 inline hashval_t
173 cost_classes_hasher::hash (const value_type *hv)
174 {
175 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
176 }
177
178 /* Compares cost classes info HV1 and HV2. */
179 inline bool
180 cost_classes_hasher::equal (const value_type *hv1, const compare_type *hv2)
181 {
182 return (hv1->num == hv2->num
183 && memcmp (hv1->classes, hv2->classes,
184 sizeof (enum reg_class) * hv1->num) == 0);
185 }
186
187 /* Delete cost classes info V from the hash table. */
188 inline void
189 cost_classes_hasher::remove (value_type *v)
190 {
191 ira_free (v);
192 }
193
194 /* Hash table of unique cost classes. */
195 static hash_table<cost_classes_hasher> *cost_classes_htab;
196
197 /* Map allocno class -> cost classes for pseudo of given allocno
198 class. */
199 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
200
201 /* Map mode -> cost classes for pseudo of give mode. */
202 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
203
204 /* Cost classes that include all classes in ira_important_classes. */
205 static cost_classes all_cost_classes;
206
207 /* Use the array of classes in CLASSES_PTR to fill out the rest of
208 the structure. */
209 static void
210 complete_cost_classes (cost_classes_t classes_ptr)
211 {
212 for (int i = 0; i < N_REG_CLASSES; i++)
213 classes_ptr->index[i] = -1;
214 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
215 classes_ptr->hard_regno_index[i] = -1;
216 for (int i = 0; i < classes_ptr->num; i++)
217 {
218 enum reg_class cl = classes_ptr->classes[i];
219 classes_ptr->index[cl] = i;
220 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
221 {
222 unsigned int hard_regno = ira_class_hard_regs[cl][j];
223 if (classes_ptr->hard_regno_index[hard_regno] < 0)
224 classes_ptr->hard_regno_index[hard_regno] = i;
225 }
226 }
227 }
228
229 /* Initialize info about the cost classes for each pseudo. */
230 static void
231 initiate_regno_cost_classes (void)
232 {
233 int size = sizeof (cost_classes_t) * max_reg_num ();
234
235 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
236 memset (regno_cost_classes, 0, size);
237 memset (cost_classes_aclass_cache, 0,
238 sizeof (cost_classes_t) * N_REG_CLASSES);
239 memset (cost_classes_mode_cache, 0,
240 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
241 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
242 all_cost_classes.num = ira_important_classes_num;
243 for (int i = 0; i < ira_important_classes_num; i++)
244 all_cost_classes.classes[i] = ira_important_classes[i];
245 complete_cost_classes (&all_cost_classes);
246 }
247
248 /* Create new cost classes from cost classes FROM and set up members
249 index and hard_regno_index. Return the new classes. The function
250 implements some common code of two functions
251 setup_regno_cost_classes_by_aclass and
252 setup_regno_cost_classes_by_mode. */
253 static cost_classes_t
254 setup_cost_classes (cost_classes_t from)
255 {
256 cost_classes_t classes_ptr;
257
258 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
259 classes_ptr->num = from->num;
260 for (int i = 0; i < from->num; i++)
261 classes_ptr->classes[i] = from->classes[i];
262 complete_cost_classes (classes_ptr);
263 return classes_ptr;
264 }
265
266 /* Return a version of FULL that only considers registers in REGS that are
267 valid for mode MODE. Both FULL and the returned class are globally
268 allocated. */
269 static cost_classes_t
270 restrict_cost_classes (cost_classes_t full, machine_mode mode,
271 const HARD_REG_SET &regs)
272 {
273 static struct cost_classes narrow;
274 int map[N_REG_CLASSES];
275 narrow.num = 0;
276 for (int i = 0; i < full->num; i++)
277 {
278 /* Assume that we'll drop the class. */
279 map[i] = -1;
280
281 /* Ignore classes that are too small for the mode. */
282 enum reg_class cl = full->classes[i];
283 if (!contains_reg_of_mode[cl][mode])
284 continue;
285
286 /* Calculate the set of registers in CL that belong to REGS and
287 are valid for MODE. */
288 HARD_REG_SET valid_for_cl;
289 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
290 AND_HARD_REG_SET (valid_for_cl, regs);
291 AND_COMPL_HARD_REG_SET (valid_for_cl,
292 ira_prohibited_class_mode_regs[cl][mode]);
293 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
294 if (hard_reg_set_empty_p (valid_for_cl))
295 continue;
296
297 /* Don't use this class if the set of valid registers is a subset
298 of an existing class. For example, suppose we have two classes
299 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
300 that the mode changes allowed by FR_REGS are not as general as
301 the mode changes allowed by GR_REGS.
302
303 In this situation, the mode changes for GR_AND_FR_REGS could
304 either be seen as the union or the intersection of the mode
305 changes allowed by the two subclasses. The justification for
306 the union-based definition would be that, if you want a mode
307 change that's only allowed by GR_REGS, you can pick a register
308 from the GR_REGS subclass. The justification for the
309 intersection-based definition would be that every register
310 from the class would allow the mode change.
311
312 However, if we have a register that needs to be in GR_REGS,
313 using GR_AND_FR_REGS with the intersection-based definition
314 would be too pessimistic, since it would bring in restrictions
315 that only apply to FR_REGS. Conversely, if we have a register
316 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
317 union-based definition would lose the extra restrictions
318 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
319 for cases where GR_REGS and FP_REGS are both valid. */
320 int pos;
321 for (pos = 0; pos < narrow.num; ++pos)
322 {
323 enum reg_class cl2 = narrow.classes[pos];
324 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
325 break;
326 }
327 map[i] = pos;
328 if (pos == narrow.num)
329 {
330 /* If several classes are equivalent, prefer to use the one
331 that was chosen as the allocno class. */
332 enum reg_class cl2 = ira_allocno_class_translate[cl];
333 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
334 cl = cl2;
335 narrow.classes[narrow.num++] = cl;
336 }
337 }
338 if (narrow.num == full->num)
339 return full;
340
341 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
342 if (*slot == NULL)
343 {
344 cost_classes_t classes = setup_cost_classes (&narrow);
345 /* Map equivalent classes to the representative that we chose above. */
346 for (int i = 0; i < ira_important_classes_num; i++)
347 {
348 enum reg_class cl = ira_important_classes[i];
349 int index = full->index[cl];
350 if (index >= 0)
351 classes->index[cl] = map[index];
352 }
353 *slot = classes;
354 }
355 return *slot;
356 }
357
358 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
359 This function is used when we know an initial approximation of
360 allocno class of the pseudo already, e.g. on the second iteration
361 of class cost calculation or after class cost calculation in
362 register-pressure sensitive insn scheduling or register-pressure
363 sensitive loop-invariant motion. */
364 static void
365 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
366 {
367 static struct cost_classes classes;
368 cost_classes_t classes_ptr;
369 enum reg_class cl;
370 int i;
371 cost_classes **slot;
372 HARD_REG_SET temp, temp2;
373 bool exclude_p;
374
375 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
376 {
377 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
378 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
379 /* We exclude classes from consideration which are subsets of
380 ACLASS only if ACLASS is an uniform class. */
381 exclude_p = ira_uniform_class_p[aclass];
382 classes.num = 0;
383 for (i = 0; i < ira_important_classes_num; i++)
384 {
385 cl = ira_important_classes[i];
386 if (exclude_p)
387 {
388 /* Exclude non-uniform classes which are subsets of
389 ACLASS. */
390 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
391 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
392 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
393 continue;
394 }
395 classes.classes[classes.num++] = cl;
396 }
397 slot = cost_classes_htab->find_slot (&classes, INSERT);
398 if (*slot == NULL)
399 {
400 classes_ptr = setup_cost_classes (&classes);
401 *slot = classes_ptr;
402 }
403 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
404 }
405 if (regno_reg_rtx[regno] != NULL_RTX)
406 {
407 /* Restrict the classes to those that are valid for REGNO's mode
408 (which might for example exclude singleton classes if the mode
409 requires two registers). Also restrict the classes to those that
410 are valid for subregs of REGNO. */
411 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
412 if (!valid_regs)
413 valid_regs = &reg_class_contents[ALL_REGS];
414 classes_ptr = restrict_cost_classes (classes_ptr,
415 PSEUDO_REGNO_MODE (regno),
416 *valid_regs);
417 }
418 regno_cost_classes[regno] = classes_ptr;
419 }
420
421 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
422 decrease number of cost classes for the pseudo, if hard registers
423 of some important classes can not hold a value of MODE. So the
424 pseudo can not get hard register of some important classes and cost
425 calculation for such important classes is only wasting CPU
426 time. */
427 static void
428 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
429 {
430 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
431 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
432 mode, *valid_regs);
433 else
434 {
435 if (cost_classes_mode_cache[mode] == NULL)
436 cost_classes_mode_cache[mode]
437 = restrict_cost_classes (&all_cost_classes, mode,
438 reg_class_contents[ALL_REGS]);
439 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
440 }
441 }
442
443 /* Finalize info about the cost classes for each pseudo. */
444 static void
445 finish_regno_cost_classes (void)
446 {
447 ira_free (regno_cost_classes);
448 delete cost_classes_htab;
449 cost_classes_htab = NULL;
450 }
451
452 \f
453
454 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
455 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
456 be a pseudo register. */
457 static int
458 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
459 secondary_reload_info *prev_sri)
460 {
461 secondary_reload_info sri;
462 reg_class_t secondary_class = NO_REGS;
463
464 /* If X is a SCRATCH, there is actually nothing to move since we are
465 assuming optimal allocation. */
466 if (GET_CODE (x) == SCRATCH)
467 return 0;
468
469 /* Get the class we will actually use for a reload. */
470 rclass = targetm.preferred_reload_class (x, rclass);
471
472 /* If we need a secondary reload for an intermediate, the cost is
473 that to load the input into the intermediate register, then to
474 copy it. */
475 sri.prev_sri = prev_sri;
476 sri.extra_cost = 0;
477 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
478
479 if (secondary_class != NO_REGS)
480 {
481 ira_init_register_move_cost_if_necessary (mode);
482 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
483 + sri.extra_cost
484 + copy_cost (x, mode, secondary_class, to_p, &sri));
485 }
486
487 /* For memory, use the memory move cost, for (hard) registers, use
488 the cost to move between the register classes, and use 2 for
489 everything else (constants). */
490 if (MEM_P (x) || rclass == NO_REGS)
491 return sri.extra_cost
492 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
493 else if (REG_P (x))
494 {
495 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
496
497 ira_init_register_move_cost_if_necessary (mode);
498 return (sri.extra_cost
499 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
500 }
501 else
502 /* If this is a constant, we may eventually want to call rtx_cost
503 here. */
504 return sri.extra_cost + COSTS_N_INSNS (1);
505 }
506
507 \f
508
509 /* Record the cost of using memory or hard registers of various
510 classes for the operands in INSN.
511
512 N_ALTS is the number of alternatives.
513 N_OPS is the number of operands.
514 OPS is an array of the operands.
515 MODES are the modes of the operands, in case any are VOIDmode.
516 CONSTRAINTS are the constraints to use for the operands. This array
517 is modified by this procedure.
518
519 This procedure works alternative by alternative. For each
520 alternative we assume that we will be able to allocate all allocnos
521 to their ideal register class and calculate the cost of using that
522 alternative. Then we compute, for each operand that is a
523 pseudo-register, the cost of having the allocno allocated to each
524 register class and using it in that alternative. To this cost is
525 added the cost of the alternative.
526
527 The cost of each class for this insn is its lowest cost among all
528 the alternatives. */
529 static void
530 record_reg_classes (int n_alts, int n_ops, rtx *ops,
531 machine_mode *modes, const char **constraints,
532 rtx_insn *insn, enum reg_class *pref)
533 {
534 int alt;
535 int i, j, k;
536 int insn_allows_mem[MAX_RECOG_OPERANDS];
537 move_table *move_in_cost, *move_out_cost;
538 short (*mem_cost)[2];
539
540 for (i = 0; i < n_ops; i++)
541 insn_allows_mem[i] = 0;
542
543 /* Process each alternative, each time minimizing an operand's cost
544 with the cost for each operand in that alternative. */
545 alternative_mask preferred = get_preferred_alternatives (insn);
546 for (alt = 0; alt < n_alts; alt++)
547 {
548 enum reg_class classes[MAX_RECOG_OPERANDS];
549 int allows_mem[MAX_RECOG_OPERANDS];
550 enum reg_class rclass;
551 int alt_fail = 0;
552 int alt_cost = 0, op_cost_add;
553
554 if (!TEST_BIT (preferred, alt))
555 {
556 for (i = 0; i < recog_data.n_operands; i++)
557 constraints[i] = skip_alternative (constraints[i]);
558
559 continue;
560 }
561
562 for (i = 0; i < n_ops; i++)
563 {
564 unsigned char c;
565 const char *p = constraints[i];
566 rtx op = ops[i];
567 machine_mode mode = modes[i];
568 int allows_addr = 0;
569 int win = 0;
570
571 /* Initially show we know nothing about the register class. */
572 classes[i] = NO_REGS;
573 allows_mem[i] = 0;
574
575 /* If this operand has no constraints at all, we can
576 conclude nothing about it since anything is valid. */
577 if (*p == 0)
578 {
579 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
580 memset (this_op_costs[i], 0, struct_costs_size);
581 continue;
582 }
583
584 /* If this alternative is only relevant when this operand
585 matches a previous operand, we do different things
586 depending on whether this operand is a allocno-reg or not.
587 We must process any modifiers for the operand before we
588 can make this test. */
589 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
590 p++;
591
592 if (p[0] >= '0' && p[0] <= '0' + i && (p[1] == ',' || p[1] == 0))
593 {
594 /* Copy class and whether memory is allowed from the
595 matching alternative. Then perform any needed cost
596 computations and/or adjustments. */
597 j = p[0] - '0';
598 classes[i] = classes[j];
599 allows_mem[i] = allows_mem[j];
600 if (allows_mem[i])
601 insn_allows_mem[i] = 1;
602
603 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
604 {
605 /* If this matches the other operand, we have no
606 added cost and we win. */
607 if (rtx_equal_p (ops[j], op))
608 win = 1;
609 /* If we can put the other operand into a register,
610 add to the cost of this alternative the cost to
611 copy this operand to the register used for the
612 other operand. */
613 else if (classes[j] != NO_REGS)
614 {
615 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
616 win = 1;
617 }
618 }
619 else if (! REG_P (ops[j])
620 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
621 {
622 /* This op is an allocno but the one it matches is
623 not. */
624
625 /* If we can't put the other operand into a
626 register, this alternative can't be used. */
627
628 if (classes[j] == NO_REGS)
629 alt_fail = 1;
630 /* Otherwise, add to the cost of this alternative
631 the cost to copy the other operand to the hard
632 register used for this operand. */
633 else
634 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
635 }
636 else
637 {
638 /* The costs of this operand are not the same as the
639 other operand since move costs are not symmetric.
640 Moreover, if we cannot tie them, this alternative
641 needs to do a copy, which is one insn. */
642 struct costs *pp = this_op_costs[i];
643 int *pp_costs = pp->cost;
644 cost_classes_t cost_classes_ptr
645 = regno_cost_classes[REGNO (op)];
646 enum reg_class *cost_classes = cost_classes_ptr->classes;
647 bool in_p = recog_data.operand_type[i] != OP_OUT;
648 bool out_p = recog_data.operand_type[i] != OP_IN;
649 enum reg_class op_class = classes[i];
650
651 ira_init_register_move_cost_if_necessary (mode);
652 if (! in_p)
653 {
654 ira_assert (out_p);
655 if (op_class == NO_REGS)
656 {
657 mem_cost = ira_memory_move_cost[mode];
658 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
659 {
660 rclass = cost_classes[k];
661 pp_costs[k] = mem_cost[rclass][0] * frequency;
662 }
663 }
664 else
665 {
666 move_out_cost = ira_may_move_out_cost[mode];
667 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
668 {
669 rclass = cost_classes[k];
670 pp_costs[k]
671 = move_out_cost[op_class][rclass] * frequency;
672 }
673 }
674 }
675 else if (! out_p)
676 {
677 ira_assert (in_p);
678 if (op_class == NO_REGS)
679 {
680 mem_cost = ira_memory_move_cost[mode];
681 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
682 {
683 rclass = cost_classes[k];
684 pp_costs[k] = mem_cost[rclass][1] * frequency;
685 }
686 }
687 else
688 {
689 move_in_cost = ira_may_move_in_cost[mode];
690 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
691 {
692 rclass = cost_classes[k];
693 pp_costs[k]
694 = move_in_cost[rclass][op_class] * frequency;
695 }
696 }
697 }
698 else
699 {
700 if (op_class == NO_REGS)
701 {
702 mem_cost = ira_memory_move_cost[mode];
703 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
704 {
705 rclass = cost_classes[k];
706 pp_costs[k] = ((mem_cost[rclass][0]
707 + mem_cost[rclass][1])
708 * frequency);
709 }
710 }
711 else
712 {
713 move_in_cost = ira_may_move_in_cost[mode];
714 move_out_cost = ira_may_move_out_cost[mode];
715 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
716 {
717 rclass = cost_classes[k];
718 pp_costs[k] = ((move_in_cost[rclass][op_class]
719 + move_out_cost[op_class][rclass])
720 * frequency);
721 }
722 }
723 }
724
725 /* If the alternative actually allows memory, make
726 things a bit cheaper since we won't need an extra
727 insn to load it. */
728 pp->mem_cost
729 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
730 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
731 - allows_mem[i]) * frequency;
732
733 /* If we have assigned a class to this allocno in
734 our first pass, add a cost to this alternative
735 corresponding to what we would add if this
736 allocno were not in the appropriate class. */
737 if (pref)
738 {
739 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
740
741 if (pref_class == NO_REGS)
742 alt_cost
743 += ((out_p
744 ? ira_memory_move_cost[mode][op_class][0] : 0)
745 + (in_p
746 ? ira_memory_move_cost[mode][op_class][1]
747 : 0));
748 else if (ira_reg_class_intersect
749 [pref_class][op_class] == NO_REGS)
750 alt_cost
751 += ira_register_move_cost[mode][pref_class][op_class];
752 }
753 if (REGNO (ops[i]) != REGNO (ops[j])
754 && ! find_reg_note (insn, REG_DEAD, op))
755 alt_cost += 2;
756
757 /* This is in place of ordinary cost computation for
758 this operand, so skip to the end of the
759 alternative (should be just one character). */
760 while (*p && *p++ != ',')
761 ;
762
763 constraints[i] = p;
764 continue;
765 }
766 }
767
768 /* Scan all the constraint letters. See if the operand
769 matches any of the constraints. Collect the valid
770 register classes and see if this operand accepts
771 memory. */
772 while ((c = *p))
773 {
774 switch (c)
775 {
776 case '*':
777 /* Ignore the next letter for this pass. */
778 c = *++p;
779 break;
780
781 case '?':
782 alt_cost += 2;
783 break;
784
785 case 'g':
786 if (MEM_P (op)
787 || (CONSTANT_P (op)
788 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
789 win = 1;
790 insn_allows_mem[i] = allows_mem[i] = 1;
791 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
792 break;
793
794 default:
795 enum constraint_num cn = lookup_constraint (p);
796 enum reg_class cl;
797 switch (get_constraint_type (cn))
798 {
799 case CT_REGISTER:
800 cl = reg_class_for_constraint (cn);
801 if (cl != NO_REGS)
802 classes[i] = ira_reg_class_subunion[classes[i]][cl];
803 break;
804
805 case CT_CONST_INT:
806 if (CONST_INT_P (op)
807 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
808 win = 1;
809 break;
810
811 case CT_MEMORY:
812 /* Every MEM can be reloaded to fit. */
813 insn_allows_mem[i] = allows_mem[i] = 1;
814 if (MEM_P (op))
815 win = 1;
816 break;
817
818 case CT_ADDRESS:
819 /* Every address can be reloaded to fit. */
820 allows_addr = 1;
821 if (address_operand (op, GET_MODE (op))
822 || constraint_satisfied_p (op, cn))
823 win = 1;
824 /* We know this operand is an address, so we
825 want it to be allocated to a hard register
826 that can be the base of an address,
827 i.e. BASE_REG_CLASS. */
828 classes[i]
829 = ira_reg_class_subunion[classes[i]]
830 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
831 ADDRESS, SCRATCH)];
832 break;
833
834 case CT_FIXED_FORM:
835 if (constraint_satisfied_p (op, cn))
836 win = 1;
837 break;
838 }
839 break;
840 }
841 p += CONSTRAINT_LEN (c, p);
842 if (c == ',')
843 break;
844 }
845
846 constraints[i] = p;
847
848 /* How we account for this operand now depends on whether it
849 is a pseudo register or not. If it is, we first check if
850 any register classes are valid. If not, we ignore this
851 alternative, since we want to assume that all allocnos get
852 allocated for register preferencing. If some register
853 class is valid, compute the costs of moving the allocno
854 into that class. */
855 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
856 {
857 if (classes[i] == NO_REGS && ! allows_mem[i])
858 {
859 /* We must always fail if the operand is a REG, but
860 we did not find a suitable class and memory is
861 not allowed.
862
863 Otherwise we may perform an uninitialized read
864 from this_op_costs after the `continue' statement
865 below. */
866 alt_fail = 1;
867 }
868 else
869 {
870 unsigned int regno = REGNO (op);
871 struct costs *pp = this_op_costs[i];
872 int *pp_costs = pp->cost;
873 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
874 enum reg_class *cost_classes = cost_classes_ptr->classes;
875 bool in_p = recog_data.operand_type[i] != OP_OUT;
876 bool out_p = recog_data.operand_type[i] != OP_IN;
877 enum reg_class op_class = classes[i];
878
879 ira_init_register_move_cost_if_necessary (mode);
880 if (! in_p)
881 {
882 ira_assert (out_p);
883 if (op_class == NO_REGS)
884 {
885 mem_cost = ira_memory_move_cost[mode];
886 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
887 {
888 rclass = cost_classes[k];
889 pp_costs[k] = mem_cost[rclass][0] * frequency;
890 }
891 }
892 else
893 {
894 move_out_cost = ira_may_move_out_cost[mode];
895 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
896 {
897 rclass = cost_classes[k];
898 pp_costs[k]
899 = move_out_cost[op_class][rclass] * frequency;
900 }
901 }
902 }
903 else if (! out_p)
904 {
905 ira_assert (in_p);
906 if (op_class == NO_REGS)
907 {
908 mem_cost = ira_memory_move_cost[mode];
909 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
910 {
911 rclass = cost_classes[k];
912 pp_costs[k] = mem_cost[rclass][1] * frequency;
913 }
914 }
915 else
916 {
917 move_in_cost = ira_may_move_in_cost[mode];
918 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
919 {
920 rclass = cost_classes[k];
921 pp_costs[k]
922 = move_in_cost[rclass][op_class] * frequency;
923 }
924 }
925 }
926 else
927 {
928 if (op_class == NO_REGS)
929 {
930 mem_cost = ira_memory_move_cost[mode];
931 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
932 {
933 rclass = cost_classes[k];
934 pp_costs[k] = ((mem_cost[rclass][0]
935 + mem_cost[rclass][1])
936 * frequency);
937 }
938 }
939 else
940 {
941 move_in_cost = ira_may_move_in_cost[mode];
942 move_out_cost = ira_may_move_out_cost[mode];
943 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
944 {
945 rclass = cost_classes[k];
946 pp_costs[k] = ((move_in_cost[rclass][op_class]
947 + move_out_cost[op_class][rclass])
948 * frequency);
949 }
950 }
951 }
952
953 if (op_class == NO_REGS)
954 /* Although we don't need insn to reload from
955 memory, still accessing memory is usually more
956 expensive than a register. */
957 pp->mem_cost = frequency;
958 else
959 /* If the alternative actually allows memory, make
960 things a bit cheaper since we won't need an
961 extra insn to load it. */
962 pp->mem_cost
963 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
964 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
965 - allows_mem[i]) * frequency;
966 /* If we have assigned a class to this allocno in
967 our first pass, add a cost to this alternative
968 corresponding to what we would add if this
969 allocno were not in the appropriate class. */
970 if (pref)
971 {
972 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
973
974 if (pref_class == NO_REGS)
975 {
976 if (op_class != NO_REGS)
977 alt_cost
978 += ((out_p
979 ? ira_memory_move_cost[mode][op_class][0]
980 : 0)
981 + (in_p
982 ? ira_memory_move_cost[mode][op_class][1]
983 : 0));
984 }
985 else if (op_class == NO_REGS)
986 alt_cost
987 += ((out_p
988 ? ira_memory_move_cost[mode][pref_class][1]
989 : 0)
990 + (in_p
991 ? ira_memory_move_cost[mode][pref_class][0]
992 : 0));
993 else if (ira_reg_class_intersect[pref_class][op_class]
994 == NO_REGS)
995 alt_cost += (ira_register_move_cost
996 [mode][pref_class][op_class]);
997 }
998 }
999 }
1000
1001 /* Otherwise, if this alternative wins, either because we
1002 have already determined that or if we have a hard
1003 register of the proper class, there is no cost for this
1004 alternative. */
1005 else if (win || (REG_P (op)
1006 && reg_fits_class_p (op, classes[i],
1007 0, GET_MODE (op))))
1008 ;
1009
1010 /* If registers are valid, the cost of this alternative
1011 includes copying the object to and/or from a
1012 register. */
1013 else if (classes[i] != NO_REGS)
1014 {
1015 if (recog_data.operand_type[i] != OP_OUT)
1016 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
1017
1018 if (recog_data.operand_type[i] != OP_IN)
1019 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
1020 }
1021 /* The only other way this alternative can be used is if
1022 this is a constant that could be placed into memory. */
1023 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1024 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1025 else
1026 alt_fail = 1;
1027 }
1028
1029 if (alt_fail)
1030 continue;
1031
1032 op_cost_add = alt_cost * frequency;
1033 /* Finally, update the costs with the information we've
1034 calculated about this alternative. */
1035 for (i = 0; i < n_ops; i++)
1036 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1037 {
1038 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1039 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1040 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1041 cost_classes_t cost_classes_ptr
1042 = regno_cost_classes[REGNO (ops[i])];
1043
1044 pp->mem_cost = MIN (pp->mem_cost,
1045 (qq->mem_cost + op_cost_add) * scale);
1046
1047 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1048 pp_costs[k]
1049 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1050 }
1051 }
1052
1053 if (allocno_p)
1054 for (i = 0; i < n_ops; i++)
1055 {
1056 ira_allocno_t a;
1057 rtx op = ops[i];
1058
1059 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1060 continue;
1061 a = ira_curr_regno_allocno_map [REGNO (op)];
1062 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1063 ALLOCNO_BAD_SPILL_P (a) = true;
1064 }
1065
1066 }
1067
1068 \f
1069
1070 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1071 static inline bool
1072 ok_for_index_p_nonstrict (rtx reg)
1073 {
1074 unsigned regno = REGNO (reg);
1075
1076 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1077 }
1078
1079 /* A version of regno_ok_for_base_p for use here, when all
1080 pseudo-registers should count as OK. Arguments as for
1081 regno_ok_for_base_p. */
1082 static inline bool
1083 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1084 enum rtx_code outer_code, enum rtx_code index_code)
1085 {
1086 unsigned regno = REGNO (reg);
1087
1088 if (regno >= FIRST_PSEUDO_REGISTER)
1089 return true;
1090 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1091 }
1092
1093 /* Record the pseudo registers we must reload into hard registers in a
1094 subexpression of a memory address, X.
1095
1096 If CONTEXT is 0, we are looking at the base part of an address,
1097 otherwise we are looking at the index part.
1098
1099 MODE and AS are the mode and address space of the memory reference;
1100 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1101 These four arguments are passed down to base_reg_class.
1102
1103 SCALE is twice the amount to multiply the cost by (it is twice so
1104 we can represent half-cost adjustments). */
1105 static void
1106 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1107 int context, enum rtx_code outer_code,
1108 enum rtx_code index_code, int scale)
1109 {
1110 enum rtx_code code = GET_CODE (x);
1111 enum reg_class rclass;
1112
1113 if (context == 1)
1114 rclass = INDEX_REG_CLASS;
1115 else
1116 rclass = base_reg_class (mode, as, outer_code, index_code);
1117
1118 switch (code)
1119 {
1120 case CONST_INT:
1121 case CONST:
1122 case CC0:
1123 case PC:
1124 case SYMBOL_REF:
1125 case LABEL_REF:
1126 return;
1127
1128 case PLUS:
1129 /* When we have an address that is a sum, we must determine
1130 whether registers are "base" or "index" regs. If there is a
1131 sum of two registers, we must choose one to be the "base".
1132 Luckily, we can use the REG_POINTER to make a good choice
1133 most of the time. We only need to do this on machines that
1134 can have two registers in an address and where the base and
1135 index register classes are different.
1136
1137 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1138 but that seems bogus since it should only be set when we are
1139 sure the register is being used as a pointer. */
1140 {
1141 rtx arg0 = XEXP (x, 0);
1142 rtx arg1 = XEXP (x, 1);
1143 enum rtx_code code0 = GET_CODE (arg0);
1144 enum rtx_code code1 = GET_CODE (arg1);
1145
1146 /* Look inside subregs. */
1147 if (code0 == SUBREG)
1148 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1149 if (code1 == SUBREG)
1150 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1151
1152 /* If this machine only allows one register per address, it
1153 must be in the first operand. */
1154 if (MAX_REGS_PER_ADDRESS == 1)
1155 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1156
1157 /* If index and base registers are the same on this machine,
1158 just record registers in any non-constant operands. We
1159 assume here, as well as in the tests below, that all
1160 addresses are in canonical form. */
1161 else if (INDEX_REG_CLASS
1162 == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1163 {
1164 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1165 if (! CONSTANT_P (arg1))
1166 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1167 }
1168
1169 /* If the second operand is a constant integer, it doesn't
1170 change what class the first operand must be. */
1171 else if (CONST_SCALAR_INT_P (arg1))
1172 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1173 /* If the second operand is a symbolic constant, the first
1174 operand must be an index register. */
1175 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1176 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1177 /* If both operands are registers but one is already a hard
1178 register of index or reg-base class, give the other the
1179 class that the hard register is not. */
1180 else if (code0 == REG && code1 == REG
1181 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1182 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1183 || ok_for_index_p_nonstrict (arg0)))
1184 record_address_regs (mode, as, arg1,
1185 ok_for_base_p_nonstrict (arg0, mode, as,
1186 PLUS, REG) ? 1 : 0,
1187 PLUS, REG, scale);
1188 else if (code0 == REG && code1 == REG
1189 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1190 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1191 || ok_for_index_p_nonstrict (arg1)))
1192 record_address_regs (mode, as, arg0,
1193 ok_for_base_p_nonstrict (arg1, mode, as,
1194 PLUS, REG) ? 1 : 0,
1195 PLUS, REG, scale);
1196 /* If one operand is known to be a pointer, it must be the
1197 base with the other operand the index. Likewise if the
1198 other operand is a MULT. */
1199 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1200 {
1201 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1202 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1203 }
1204 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1205 {
1206 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1207 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1208 }
1209 /* Otherwise, count equal chances that each might be a base or
1210 index register. This case should be rare. */
1211 else
1212 {
1213 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1214 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1215 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1216 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1217 }
1218 }
1219 break;
1220
1221 /* Double the importance of an allocno that is incremented or
1222 decremented, since it would take two extra insns if it ends
1223 up in the wrong place. */
1224 case POST_MODIFY:
1225 case PRE_MODIFY:
1226 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1227 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1228 if (REG_P (XEXP (XEXP (x, 1), 1)))
1229 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1230 2 * scale);
1231 break;
1232
1233 case POST_INC:
1234 case PRE_INC:
1235 case POST_DEC:
1236 case PRE_DEC:
1237 /* Double the importance of an allocno that is incremented or
1238 decremented, since it would take two extra insns if it ends
1239 up in the wrong place. */
1240 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1241 break;
1242
1243 case REG:
1244 {
1245 struct costs *pp;
1246 int *pp_costs;
1247 enum reg_class i;
1248 int k, regno, add_cost;
1249 cost_classes_t cost_classes_ptr;
1250 enum reg_class *cost_classes;
1251 move_table *move_in_cost;
1252
1253 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1254 break;
1255
1256 regno = REGNO (x);
1257 if (allocno_p)
1258 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1259 pp = COSTS (costs, COST_INDEX (regno));
1260 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1261 if (INT_MAX - add_cost < pp->mem_cost)
1262 pp->mem_cost = INT_MAX;
1263 else
1264 pp->mem_cost += add_cost;
1265 cost_classes_ptr = regno_cost_classes[regno];
1266 cost_classes = cost_classes_ptr->classes;
1267 pp_costs = pp->cost;
1268 ira_init_register_move_cost_if_necessary (Pmode);
1269 move_in_cost = ira_may_move_in_cost[Pmode];
1270 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1271 {
1272 i = cost_classes[k];
1273 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1274 if (INT_MAX - add_cost < pp_costs[k])
1275 pp_costs[k] = INT_MAX;
1276 else
1277 pp_costs[k] += add_cost;
1278 }
1279 }
1280 break;
1281
1282 default:
1283 {
1284 const char *fmt = GET_RTX_FORMAT (code);
1285 int i;
1286 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1287 if (fmt[i] == 'e')
1288 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1289 scale);
1290 }
1291 }
1292 }
1293
1294 \f
1295
1296 /* Calculate the costs of insn operands. */
1297 static void
1298 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1299 {
1300 const char *constraints[MAX_RECOG_OPERANDS];
1301 machine_mode modes[MAX_RECOG_OPERANDS];
1302 rtx ops[MAX_RECOG_OPERANDS];
1303 rtx set;
1304 int i;
1305
1306 for (i = 0; i < recog_data.n_operands; i++)
1307 {
1308 constraints[i] = recog_data.constraints[i];
1309 modes[i] = recog_data.operand_mode[i];
1310 }
1311
1312 /* If we get here, we are set up to record the costs of all the
1313 operands for this insn. Start by initializing the costs. Then
1314 handle any address registers. Finally record the desired classes
1315 for any allocnos, doing it twice if some pair of operands are
1316 commutative. */
1317 for (i = 0; i < recog_data.n_operands; i++)
1318 {
1319 memcpy (op_costs[i], init_cost, struct_costs_size);
1320
1321 ops[i] = recog_data.operand[i];
1322 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1323 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1324
1325 if (MEM_P (recog_data.operand[i]))
1326 record_address_regs (GET_MODE (recog_data.operand[i]),
1327 MEM_ADDR_SPACE (recog_data.operand[i]),
1328 XEXP (recog_data.operand[i], 0),
1329 0, MEM, SCRATCH, frequency * 2);
1330 else if (constraints[i][0] == 'p'
1331 || (insn_extra_address_constraint
1332 (lookup_constraint (constraints[i]))))
1333 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1334 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1335 frequency * 2);
1336 }
1337
1338 /* Check for commutative in a separate loop so everything will have
1339 been initialized. We must do this even if one operand is a
1340 constant--see addsi3 in m68k.md. */
1341 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1342 if (constraints[i][0] == '%')
1343 {
1344 const char *xconstraints[MAX_RECOG_OPERANDS];
1345 int j;
1346
1347 /* Handle commutative operands by swapping the constraints.
1348 We assume the modes are the same. */
1349 for (j = 0; j < recog_data.n_operands; j++)
1350 xconstraints[j] = constraints[j];
1351
1352 xconstraints[i] = constraints[i+1];
1353 xconstraints[i+1] = constraints[i];
1354 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1355 recog_data.operand, modes,
1356 xconstraints, insn, pref);
1357 }
1358 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1359 recog_data.operand, modes,
1360 constraints, insn, pref);
1361
1362 /* If this insn is a single set copying operand 1 to operand 0 and
1363 one operand is an allocno with the other a hard reg or an allocno
1364 that prefers a hard register that is in its own register class
1365 then we may want to adjust the cost of that register class to -1.
1366
1367 Avoid the adjustment if the source does not die to avoid
1368 stressing of register allocator by preferencing two colliding
1369 registers into single class.
1370
1371 Also avoid the adjustment if a copy between hard registers of the
1372 class is expensive (ten times the cost of a default copy is
1373 considered arbitrarily expensive). This avoids losing when the
1374 preferred class is very expensive as the source of a copy
1375 instruction. */
1376 if ((set = single_set (insn)) != NULL_RTX
1377 /* In rare cases the single set insn might have less 2 operands
1378 as the source can be a fixed special reg. */
1379 && recog_data.n_operands > 1
1380 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1381 {
1382 int regno, other_regno;
1383 rtx dest = SET_DEST (set);
1384 rtx src = SET_SRC (set);
1385
1386 dest = SET_DEST (set);
1387 src = SET_SRC (set);
1388 if (GET_CODE (dest) == SUBREG
1389 && (GET_MODE_SIZE (GET_MODE (dest))
1390 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1391 dest = SUBREG_REG (dest);
1392 if (GET_CODE (src) == SUBREG
1393 && (GET_MODE_SIZE (GET_MODE (src))
1394 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1395 src = SUBREG_REG (src);
1396 if (REG_P (src) && REG_P (dest)
1397 && find_regno_note (insn, REG_DEAD, REGNO (src))
1398 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1399 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1400 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1401 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1402 {
1403 machine_mode mode = GET_MODE (src);
1404 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1405 enum reg_class *cost_classes = cost_classes_ptr->classes;
1406 reg_class_t rclass;
1407 int k, nr;
1408
1409 i = regno == (int) REGNO (src) ? 1 : 0;
1410 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1411 {
1412 rclass = cost_classes[k];
1413 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1414 && (reg_class_size[(int) rclass]
1415 == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1416 {
1417 if (reg_class_size[rclass] == 1)
1418 op_costs[i]->cost[k] = -frequency;
1419 else
1420 {
1421 for (nr = 0;
1422 nr < hard_regno_nregs[other_regno][mode];
1423 nr++)
1424 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass],
1425 other_regno + nr))
1426 break;
1427
1428 if (nr == hard_regno_nregs[other_regno][mode])
1429 op_costs[i]->cost[k] = -frequency;
1430 }
1431 }
1432 }
1433 }
1434 }
1435 }
1436
1437 \f
1438
1439 /* Process one insn INSN. Scan it and record each time it would save
1440 code to put a certain allocnos in a certain class. Return the last
1441 insn processed, so that the scan can be continued from there. */
1442 static rtx_insn *
1443 scan_one_insn (rtx_insn *insn)
1444 {
1445 enum rtx_code pat_code;
1446 rtx set, note;
1447 int i, k;
1448 bool counted_mem;
1449
1450 if (!NONDEBUG_INSN_P (insn))
1451 return insn;
1452
1453 pat_code = GET_CODE (PATTERN (insn));
1454 if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT)
1455 return insn;
1456
1457 counted_mem = false;
1458 set = single_set (insn);
1459 extract_insn (insn);
1460
1461 /* If this insn loads a parameter from its stack slot, then it
1462 represents a savings, rather than a cost, if the parameter is
1463 stored in memory. Record this fact.
1464
1465 Similarly if we're loading other constants from memory (constant
1466 pool, TOC references, small data areas, etc) and this is the only
1467 assignment to the destination pseudo.
1468
1469 Don't do this if SET_SRC (set) isn't a general operand, if it is
1470 a memory requiring special instructions to load it, decreasing
1471 mem_cost might result in it being loaded using the specialized
1472 instruction into a register, then stored into stack and loaded
1473 again from the stack. See PR52208.
1474
1475 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1476 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1477 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1478 && ((MEM_P (XEXP (note, 0))
1479 && !side_effects_p (SET_SRC (set)))
1480 || (CONSTANT_P (XEXP (note, 0))
1481 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1482 XEXP (note, 0))
1483 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1484 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))))
1485 {
1486 enum reg_class cl = GENERAL_REGS;
1487 rtx reg = SET_DEST (set);
1488 int num = COST_INDEX (REGNO (reg));
1489
1490 COSTS (costs, num)->mem_cost
1491 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1492 record_address_regs (GET_MODE (SET_SRC (set)),
1493 MEM_ADDR_SPACE (SET_SRC (set)),
1494 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1495 frequency * 2);
1496 counted_mem = true;
1497 }
1498
1499 record_operand_costs (insn, pref);
1500
1501 /* Now add the cost for each operand to the total costs for its
1502 allocno. */
1503 for (i = 0; i < recog_data.n_operands; i++)
1504 if (REG_P (recog_data.operand[i])
1505 && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1506 {
1507 int regno = REGNO (recog_data.operand[i]);
1508 struct costs *p = COSTS (costs, COST_INDEX (regno));
1509 struct costs *q = op_costs[i];
1510 int *p_costs = p->cost, *q_costs = q->cost;
1511 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1512 int add_cost;
1513
1514 /* If the already accounted for the memory "cost" above, don't
1515 do so again. */
1516 if (!counted_mem)
1517 {
1518 add_cost = q->mem_cost;
1519 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1520 p->mem_cost = INT_MAX;
1521 else
1522 p->mem_cost += add_cost;
1523 }
1524 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1525 {
1526 add_cost = q_costs[k];
1527 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1528 p_costs[k] = INT_MAX;
1529 else
1530 p_costs[k] += add_cost;
1531 }
1532 }
1533
1534 return insn;
1535 }
1536
1537 \f
1538
1539 /* Print allocnos costs to file F. */
1540 static void
1541 print_allocno_costs (FILE *f)
1542 {
1543 int k;
1544 ira_allocno_t a;
1545 ira_allocno_iterator ai;
1546
1547 ira_assert (allocno_p);
1548 fprintf (f, "\n");
1549 FOR_EACH_ALLOCNO (a, ai)
1550 {
1551 int i, rclass;
1552 basic_block bb;
1553 int regno = ALLOCNO_REGNO (a);
1554 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1555 enum reg_class *cost_classes = cost_classes_ptr->classes;
1556
1557 i = ALLOCNO_NUM (a);
1558 fprintf (f, " a%d(r%d,", i, regno);
1559 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1560 fprintf (f, "b%d", bb->index);
1561 else
1562 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1563 fprintf (f, ") costs:");
1564 for (k = 0; k < cost_classes_ptr->num; k++)
1565 {
1566 rclass = cost_classes[k];
1567 fprintf (f, " %s:%d", reg_class_names[rclass],
1568 COSTS (costs, i)->cost[k]);
1569 if (flag_ira_region == IRA_REGION_ALL
1570 || flag_ira_region == IRA_REGION_MIXED)
1571 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1572 }
1573 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1574 if (flag_ira_region == IRA_REGION_ALL
1575 || flag_ira_region == IRA_REGION_MIXED)
1576 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1577 fprintf (f, "\n");
1578 }
1579 }
1580
1581 /* Print pseudo costs to file F. */
1582 static void
1583 print_pseudo_costs (FILE *f)
1584 {
1585 int regno, k;
1586 int rclass;
1587 cost_classes_t cost_classes_ptr;
1588 enum reg_class *cost_classes;
1589
1590 ira_assert (! allocno_p);
1591 fprintf (f, "\n");
1592 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1593 {
1594 if (REG_N_REFS (regno) <= 0)
1595 continue;
1596 cost_classes_ptr = regno_cost_classes[regno];
1597 cost_classes = cost_classes_ptr->classes;
1598 fprintf (f, " r%d costs:", regno);
1599 for (k = 0; k < cost_classes_ptr->num; k++)
1600 {
1601 rclass = cost_classes[k];
1602 fprintf (f, " %s:%d", reg_class_names[rclass],
1603 COSTS (costs, regno)->cost[k]);
1604 }
1605 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1606 }
1607 }
1608
1609 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1610 costs. */
1611 static void
1612 process_bb_for_costs (basic_block bb)
1613 {
1614 rtx_insn *insn;
1615
1616 frequency = REG_FREQ_FROM_BB (bb);
1617 if (frequency == 0)
1618 frequency = 1;
1619 FOR_BB_INSNS (bb, insn)
1620 insn = scan_one_insn (insn);
1621 }
1622
1623 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1624 costs. */
1625 static void
1626 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1627 {
1628 basic_block bb;
1629
1630 bb = loop_tree_node->bb;
1631 if (bb != NULL)
1632 process_bb_for_costs (bb);
1633 }
1634
1635 /* Find costs of register classes and memory for allocnos or pseudos
1636 and their best costs. Set up preferred, alternative and allocno
1637 classes for pseudos. */
1638 static void
1639 find_costs_and_classes (FILE *dump_file)
1640 {
1641 int i, k, start, max_cost_classes_num;
1642 int pass;
1643 basic_block bb;
1644 enum reg_class *regno_best_class;
1645
1646 init_recog ();
1647 regno_best_class
1648 = (enum reg_class *) ira_allocate (max_reg_num ()
1649 * sizeof (enum reg_class));
1650 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1651 regno_best_class[i] = NO_REGS;
1652 if (!resize_reg_info () && allocno_p
1653 && pseudo_classes_defined_p && flag_expensive_optimizations)
1654 {
1655 ira_allocno_t a;
1656 ira_allocno_iterator ai;
1657
1658 pref = pref_buffer;
1659 max_cost_classes_num = 1;
1660 FOR_EACH_ALLOCNO (a, ai)
1661 {
1662 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1663 setup_regno_cost_classes_by_aclass
1664 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1665 max_cost_classes_num
1666 = MAX (max_cost_classes_num,
1667 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1668 }
1669 start = 1;
1670 }
1671 else
1672 {
1673 pref = NULL;
1674 max_cost_classes_num = ira_important_classes_num;
1675 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1676 if (regno_reg_rtx[i] != NULL_RTX)
1677 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1678 else
1679 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1680 start = 0;
1681 }
1682 if (allocno_p)
1683 /* Clear the flag for the next compiled function. */
1684 pseudo_classes_defined_p = false;
1685 /* Normally we scan the insns once and determine the best class to
1686 use for each allocno. However, if -fexpensive-optimizations are
1687 on, we do so twice, the second time using the tentative best
1688 classes to guide the selection. */
1689 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1690 {
1691 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1692 fprintf (dump_file,
1693 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1694
1695 if (pass != start)
1696 {
1697 max_cost_classes_num = 1;
1698 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1699 {
1700 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1701 max_cost_classes_num
1702 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1703 }
1704 }
1705
1706 struct_costs_size
1707 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1708 /* Zero out our accumulation of the cost of each class for each
1709 allocno. */
1710 memset (costs, 0, cost_elements_num * struct_costs_size);
1711
1712 if (allocno_p)
1713 {
1714 /* Scan the instructions and record each time it would save code
1715 to put a certain allocno in a certain class. */
1716 ira_traverse_loop_tree (true, ira_loop_tree_root,
1717 process_bb_node_for_costs, NULL);
1718
1719 memcpy (total_allocno_costs, costs,
1720 max_struct_costs_size * ira_allocnos_num);
1721 }
1722 else
1723 {
1724 basic_block bb;
1725
1726 FOR_EACH_BB_FN (bb, cfun)
1727 process_bb_for_costs (bb);
1728 }
1729
1730 if (pass == 0)
1731 pref = pref_buffer;
1732
1733 /* Now for each allocno look at how desirable each class is and
1734 find which class is preferred. */
1735 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1736 {
1737 ira_allocno_t a, parent_a;
1738 int rclass, a_num, parent_a_num, add_cost;
1739 ira_loop_tree_node_t parent;
1740 int best_cost, allocno_cost;
1741 enum reg_class best, alt_class;
1742 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1743 enum reg_class *cost_classes = cost_classes_ptr->classes;
1744 int *i_costs = temp_costs->cost;
1745 int i_mem_cost;
1746 int equiv_savings = regno_equiv_gains[i];
1747
1748 if (! allocno_p)
1749 {
1750 if (regno_reg_rtx[i] == NULL_RTX)
1751 continue;
1752 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1753 i_mem_cost = temp_costs->mem_cost;
1754 }
1755 else
1756 {
1757 if (ira_regno_allocno_map[i] == NULL)
1758 continue;
1759 memset (temp_costs, 0, struct_costs_size);
1760 i_mem_cost = 0;
1761 /* Find cost of all allocnos with the same regno. */
1762 for (a = ira_regno_allocno_map[i];
1763 a != NULL;
1764 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1765 {
1766 int *a_costs, *p_costs;
1767
1768 a_num = ALLOCNO_NUM (a);
1769 if ((flag_ira_region == IRA_REGION_ALL
1770 || flag_ira_region == IRA_REGION_MIXED)
1771 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1772 && (parent_a = parent->regno_allocno_map[i]) != NULL
1773 /* There are no caps yet. */
1774 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1775 (a)->border_allocnos,
1776 ALLOCNO_NUM (a)))
1777 {
1778 /* Propagate costs to upper levels in the region
1779 tree. */
1780 parent_a_num = ALLOCNO_NUM (parent_a);
1781 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1782 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1783 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1784 {
1785 add_cost = a_costs[k];
1786 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1787 p_costs[k] = INT_MAX;
1788 else
1789 p_costs[k] += add_cost;
1790 }
1791 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1792 if (add_cost > 0
1793 && (INT_MAX - add_cost
1794 < COSTS (total_allocno_costs,
1795 parent_a_num)->mem_cost))
1796 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1797 = INT_MAX;
1798 else
1799 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1800 += add_cost;
1801
1802 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1803 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1804 }
1805 a_costs = COSTS (costs, a_num)->cost;
1806 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1807 {
1808 add_cost = a_costs[k];
1809 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1810 i_costs[k] = INT_MAX;
1811 else
1812 i_costs[k] += add_cost;
1813 }
1814 add_cost = COSTS (costs, a_num)->mem_cost;
1815 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1816 i_mem_cost = INT_MAX;
1817 else
1818 i_mem_cost += add_cost;
1819 }
1820 }
1821 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1822 i_mem_cost = 0;
1823 else if (equiv_savings < 0)
1824 i_mem_cost = -equiv_savings;
1825 else if (equiv_savings > 0)
1826 {
1827 i_mem_cost = 0;
1828 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1829 i_costs[k] += equiv_savings;
1830 }
1831
1832 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1833 best = ALL_REGS;
1834 alt_class = NO_REGS;
1835 /* Find best common class for all allocnos with the same
1836 regno. */
1837 for (k = 0; k < cost_classes_ptr->num; k++)
1838 {
1839 rclass = cost_classes[k];
1840 if (i_costs[k] < best_cost)
1841 {
1842 best_cost = i_costs[k];
1843 best = (enum reg_class) rclass;
1844 }
1845 else if (i_costs[k] == best_cost)
1846 best = ira_reg_class_subunion[best][rclass];
1847 if (pass == flag_expensive_optimizations
1848 /* We still prefer registers to memory even at this
1849 stage if their costs are the same. We will make
1850 a final decision during assigning hard registers
1851 when we have all info including more accurate
1852 costs which might be affected by assigning hard
1853 registers to other pseudos because the pseudos
1854 involved in moves can be coalesced. */
1855 && i_costs[k] <= i_mem_cost
1856 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1857 > reg_class_size[alt_class]))
1858 alt_class = reg_class_subunion[alt_class][rclass];
1859 }
1860 alt_class = ira_allocno_class_translate[alt_class];
1861 if (best_cost > i_mem_cost)
1862 regno_aclass[i] = NO_REGS;
1863 else if (!optimize && !targetm.class_likely_spilled_p (best))
1864 /* Registers in the alternative class are likely to need
1865 longer or slower sequences than registers in the best class.
1866 When optimizing we make some effort to use the best class
1867 over the alternative class where possible, but at -O0 we
1868 effectively give the alternative class equal weight.
1869 We then run the risk of using slower alternative registers
1870 when plenty of registers from the best class are still free.
1871 This is especially true because live ranges tend to be very
1872 short in -O0 code and so register pressure tends to be low.
1873
1874 Avoid that by ignoring the alternative class if the best
1875 class has plenty of registers. */
1876 regno_aclass[i] = best;
1877 else
1878 {
1879 /* Make the common class the biggest class of best and
1880 alt_class. */
1881 regno_aclass[i]
1882 = ira_reg_class_superunion[best][alt_class];
1883 ira_assert (regno_aclass[i] != NO_REGS
1884 && ira_reg_allocno_class_p[regno_aclass[i]]);
1885 }
1886 if (pass == flag_expensive_optimizations)
1887 {
1888 if (best_cost > i_mem_cost)
1889 best = alt_class = NO_REGS;
1890 else if (best == alt_class)
1891 alt_class = NO_REGS;
1892 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1893 if ((!allocno_p || internal_flag_ira_verbose > 2)
1894 && dump_file != NULL)
1895 fprintf (dump_file,
1896 " r%d: preferred %s, alternative %s, allocno %s\n",
1897 i, reg_class_names[best], reg_class_names[alt_class],
1898 reg_class_names[regno_aclass[i]]);
1899 }
1900 regno_best_class[i] = best;
1901 if (! allocno_p)
1902 {
1903 pref[i] = best_cost > i_mem_cost ? NO_REGS : best;
1904 continue;
1905 }
1906 for (a = ira_regno_allocno_map[i];
1907 a != NULL;
1908 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1909 {
1910 enum reg_class aclass = regno_aclass[i];
1911 int a_num = ALLOCNO_NUM (a);
1912 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1913 int *a_costs = COSTS (costs, a_num)->cost;
1914
1915 if (aclass == NO_REGS)
1916 best = NO_REGS;
1917 else
1918 {
1919 /* Finding best class which is subset of the common
1920 class. */
1921 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1922 allocno_cost = best_cost;
1923 best = ALL_REGS;
1924 for (k = 0; k < cost_classes_ptr->num; k++)
1925 {
1926 rclass = cost_classes[k];
1927 if (! ira_class_subset_p[rclass][aclass])
1928 continue;
1929 if (total_a_costs[k] < best_cost)
1930 {
1931 best_cost = total_a_costs[k];
1932 allocno_cost = a_costs[k];
1933 best = (enum reg_class) rclass;
1934 }
1935 else if (total_a_costs[k] == best_cost)
1936 {
1937 best = ira_reg_class_subunion[best][rclass];
1938 allocno_cost = MAX (allocno_cost, a_costs[k]);
1939 }
1940 }
1941 ALLOCNO_CLASS_COST (a) = allocno_cost;
1942 }
1943 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1944 && (pass == 0 || pref[a_num] != best))
1945 {
1946 fprintf (dump_file, " a%d (r%d,", a_num, i);
1947 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1948 fprintf (dump_file, "b%d", bb->index);
1949 else
1950 fprintf (dump_file, "l%d",
1951 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1952 fprintf (dump_file, ") best %s, allocno %s\n",
1953 reg_class_names[best],
1954 reg_class_names[aclass]);
1955 }
1956 pref[a_num] = best;
1957 if (pass == flag_expensive_optimizations && best != aclass
1958 && ira_class_hard_regs_num[best] > 0
1959 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1960 >= ira_class_hard_regs_num[best]))
1961 {
1962 int ind = cost_classes_ptr->index[aclass];
1963
1964 ira_assert (ind >= 0);
1965 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1966 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1967 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
1968 / (ira_register_move_cost
1969 [ALLOCNO_MODE (a)][best][aclass]));
1970 for (k = 0; k < cost_classes_ptr->num; k++)
1971 if (ira_class_subset_p[cost_classes[k]][best])
1972 a_costs[k] = a_costs[ind];
1973 }
1974 }
1975 }
1976
1977 if (internal_flag_ira_verbose > 4 && dump_file)
1978 {
1979 if (allocno_p)
1980 print_allocno_costs (dump_file);
1981 else
1982 print_pseudo_costs (dump_file);
1983 fprintf (dump_file,"\n");
1984 }
1985 }
1986 ira_free (regno_best_class);
1987 }
1988
1989 \f
1990
1991 /* Process moves involving hard regs to modify allocno hard register
1992 costs. We can do this only after determining allocno class. If a
1993 hard register forms a register class, then moves with the hard
1994 register are already taken into account in class costs for the
1995 allocno. */
1996 static void
1997 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
1998 {
1999 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2000 bool to_p;
2001 ira_allocno_t a, curr_a;
2002 ira_loop_tree_node_t curr_loop_tree_node;
2003 enum reg_class rclass;
2004 basic_block bb;
2005 rtx_insn *insn;
2006 rtx set, src, dst;
2007
2008 bb = loop_tree_node->bb;
2009 if (bb == NULL)
2010 return;
2011 freq = REG_FREQ_FROM_BB (bb);
2012 if (freq == 0)
2013 freq = 1;
2014 FOR_BB_INSNS (bb, insn)
2015 {
2016 if (!NONDEBUG_INSN_P (insn))
2017 continue;
2018 set = single_set (insn);
2019 if (set == NULL_RTX)
2020 continue;
2021 dst = SET_DEST (set);
2022 src = SET_SRC (set);
2023 if (! REG_P (dst) || ! REG_P (src))
2024 continue;
2025 dst_regno = REGNO (dst);
2026 src_regno = REGNO (src);
2027 if (dst_regno >= FIRST_PSEUDO_REGISTER
2028 && src_regno < FIRST_PSEUDO_REGISTER)
2029 {
2030 hard_regno = src_regno;
2031 a = ira_curr_regno_allocno_map[dst_regno];
2032 to_p = true;
2033 }
2034 else if (src_regno >= FIRST_PSEUDO_REGISTER
2035 && dst_regno < FIRST_PSEUDO_REGISTER)
2036 {
2037 hard_regno = dst_regno;
2038 a = ira_curr_regno_allocno_map[src_regno];
2039 to_p = false;
2040 }
2041 else
2042 continue;
2043 rclass = ALLOCNO_CLASS (a);
2044 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2045 continue;
2046 i = ira_class_hard_reg_index[rclass][hard_regno];
2047 if (i < 0)
2048 continue;
2049 a_regno = ALLOCNO_REGNO (a);
2050 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2051 curr_loop_tree_node != NULL;
2052 curr_loop_tree_node = curr_loop_tree_node->parent)
2053 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2054 ira_add_allocno_pref (curr_a, hard_regno, freq);
2055 {
2056 int cost;
2057 enum reg_class hard_reg_class;
2058 machine_mode mode;
2059
2060 mode = ALLOCNO_MODE (a);
2061 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2062 ira_init_register_move_cost_if_necessary (mode);
2063 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2064 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2065 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2066 ALLOCNO_CLASS_COST (a));
2067 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2068 rclass, 0);
2069 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2070 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2071 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2072 ALLOCNO_HARD_REG_COSTS (a)[i]);
2073 }
2074 }
2075 }
2076
2077 /* After we find hard register and memory costs for allocnos, define
2078 its class and modify hard register cost because insns moving
2079 allocno to/from hard registers. */
2080 static void
2081 setup_allocno_class_and_costs (void)
2082 {
2083 int i, j, n, regno, hard_regno, num;
2084 int *reg_costs;
2085 enum reg_class aclass, rclass;
2086 ira_allocno_t a;
2087 ira_allocno_iterator ai;
2088 cost_classes_t cost_classes_ptr;
2089
2090 ira_assert (allocno_p);
2091 FOR_EACH_ALLOCNO (a, ai)
2092 {
2093 i = ALLOCNO_NUM (a);
2094 regno = ALLOCNO_REGNO (a);
2095 aclass = regno_aclass[regno];
2096 cost_classes_ptr = regno_cost_classes[regno];
2097 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2098 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2099 ira_set_allocno_class (a, aclass);
2100 if (aclass == NO_REGS)
2101 continue;
2102 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2103 {
2104 n = ira_class_hard_regs_num[aclass];
2105 ALLOCNO_HARD_REG_COSTS (a)
2106 = reg_costs = ira_allocate_cost_vector (aclass);
2107 for (j = n - 1; j >= 0; j--)
2108 {
2109 hard_regno = ira_class_hard_regs[aclass][j];
2110 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2111 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2112 else
2113 {
2114 rclass = REGNO_REG_CLASS (hard_regno);
2115 num = cost_classes_ptr->index[rclass];
2116 if (num < 0)
2117 {
2118 num = cost_classes_ptr->hard_regno_index[hard_regno];
2119 ira_assert (num >= 0);
2120 }
2121 reg_costs[j] = COSTS (costs, i)->cost[num];
2122 }
2123 }
2124 }
2125 }
2126 if (optimize)
2127 ira_traverse_loop_tree (true, ira_loop_tree_root,
2128 process_bb_node_for_hard_reg_moves, NULL);
2129 }
2130
2131 \f
2132
2133 /* Function called once during compiler work. */
2134 void
2135 ira_init_costs_once (void)
2136 {
2137 int i;
2138
2139 init_cost = NULL;
2140 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2141 {
2142 op_costs[i] = NULL;
2143 this_op_costs[i] = NULL;
2144 }
2145 temp_costs = NULL;
2146 }
2147
2148 /* Free allocated temporary cost vectors. */
2149 void
2150 target_ira_int::free_ira_costs ()
2151 {
2152 int i;
2153
2154 free (x_init_cost);
2155 x_init_cost = NULL;
2156 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2157 {
2158 free (x_op_costs[i]);
2159 free (x_this_op_costs[i]);
2160 x_op_costs[i] = x_this_op_costs[i] = NULL;
2161 }
2162 free (x_temp_costs);
2163 x_temp_costs = NULL;
2164 }
2165
2166 /* This is called each time register related information is
2167 changed. */
2168 void
2169 ira_init_costs (void)
2170 {
2171 int i;
2172
2173 this_target_ira_int->free_ira_costs ();
2174 max_struct_costs_size
2175 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2176 /* Don't use ira_allocate because vectors live through several IRA
2177 calls. */
2178 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2179 init_cost->mem_cost = 1000000;
2180 for (i = 0; i < ira_important_classes_num; i++)
2181 init_cost->cost[i] = 1000000;
2182 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2183 {
2184 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2185 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2186 }
2187 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2188 }
2189
2190 \f
2191
2192 /* Common initialization function for ira_costs and
2193 ira_set_pseudo_classes. */
2194 static void
2195 init_costs (void)
2196 {
2197 init_subregs_of_mode ();
2198 costs = (struct costs *) ira_allocate (max_struct_costs_size
2199 * cost_elements_num);
2200 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2201 * cost_elements_num);
2202 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2203 * max_reg_num ());
2204 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2205 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2206 }
2207
2208 /* Common finalization function for ira_costs and
2209 ira_set_pseudo_classes. */
2210 static void
2211 finish_costs (void)
2212 {
2213 finish_subregs_of_mode ();
2214 ira_free (regno_equiv_gains);
2215 ira_free (regno_aclass);
2216 ira_free (pref_buffer);
2217 ira_free (costs);
2218 }
2219
2220 /* Entry function which defines register class, memory and hard
2221 register costs for each allocno. */
2222 void
2223 ira_costs (void)
2224 {
2225 allocno_p = true;
2226 cost_elements_num = ira_allocnos_num;
2227 init_costs ();
2228 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2229 * ira_allocnos_num);
2230 initiate_regno_cost_classes ();
2231 calculate_elim_costs_all_insns ();
2232 find_costs_and_classes (ira_dump_file);
2233 setup_allocno_class_and_costs ();
2234 finish_regno_cost_classes ();
2235 finish_costs ();
2236 ira_free (total_allocno_costs);
2237 }
2238
2239 /* Entry function which defines classes for pseudos.
2240 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2241 void
2242 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2243 {
2244 allocno_p = false;
2245 internal_flag_ira_verbose = flag_ira_verbose;
2246 cost_elements_num = max_reg_num ();
2247 init_costs ();
2248 initiate_regno_cost_classes ();
2249 find_costs_and_classes (dump_file);
2250 finish_regno_cost_classes ();
2251 if (define_pseudo_classes)
2252 pseudo_classes_defined_p = true;
2253
2254 finish_costs ();
2255 }
2256
2257 \f
2258
2259 /* Change hard register costs for allocnos which lives through
2260 function calls. This is called only when we found all intersected
2261 calls during building allocno live ranges. */
2262 void
2263 ira_tune_allocno_costs (void)
2264 {
2265 int j, n, regno;
2266 int cost, min_cost, *reg_costs;
2267 enum reg_class aclass, rclass;
2268 machine_mode mode;
2269 ira_allocno_t a;
2270 ira_allocno_iterator ai;
2271 ira_allocno_object_iterator oi;
2272 ira_object_t obj;
2273 bool skip_p;
2274 HARD_REG_SET *crossed_calls_clobber_regs;
2275
2276 FOR_EACH_ALLOCNO (a, ai)
2277 {
2278 aclass = ALLOCNO_CLASS (a);
2279 if (aclass == NO_REGS)
2280 continue;
2281 mode = ALLOCNO_MODE (a);
2282 n = ira_class_hard_regs_num[aclass];
2283 min_cost = INT_MAX;
2284 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2285 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2286 {
2287 ira_allocate_and_set_costs
2288 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2289 ALLOCNO_CLASS_COST (a));
2290 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2291 for (j = n - 1; j >= 0; j--)
2292 {
2293 regno = ira_class_hard_regs[aclass][j];
2294 skip_p = false;
2295 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2296 {
2297 if (ira_hard_reg_set_intersection_p (regno, mode,
2298 OBJECT_CONFLICT_HARD_REGS
2299 (obj)))
2300 {
2301 skip_p = true;
2302 break;
2303 }
2304 }
2305 if (skip_p)
2306 continue;
2307 rclass = REGNO_REG_CLASS (regno);
2308 cost = 0;
2309 crossed_calls_clobber_regs
2310 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2311 if (ira_hard_reg_set_intersection_p (regno, mode,
2312 *crossed_calls_clobber_regs)
2313 && (ira_hard_reg_set_intersection_p (regno, mode,
2314 call_used_reg_set)
2315 || HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)))
2316 cost += (ALLOCNO_CALL_FREQ (a)
2317 * (ira_memory_move_cost[mode][rclass][0]
2318 + ira_memory_move_cost[mode][rclass][1]));
2319 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2320 cost += ((ira_memory_move_cost[mode][rclass][0]
2321 + ira_memory_move_cost[mode][rclass][1])
2322 * ALLOCNO_FREQ (a)
2323 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2324 #endif
2325 if (INT_MAX - cost < reg_costs[j])
2326 reg_costs[j] = INT_MAX;
2327 else
2328 reg_costs[j] += cost;
2329 if (min_cost > reg_costs[j])
2330 min_cost = reg_costs[j];
2331 }
2332 }
2333 if (min_cost != INT_MAX)
2334 ALLOCNO_CLASS_COST (a) = min_cost;
2335
2336 /* Some targets allow pseudos to be allocated to unaligned sequences
2337 of hard registers. However, selecting an unaligned sequence can
2338 unnecessarily restrict later allocations. So increase the cost of
2339 unaligned hard regs to encourage the use of aligned hard regs. */
2340 {
2341 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2342
2343 if (nregs > 1)
2344 {
2345 ira_allocate_and_set_costs
2346 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2347 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2348 for (j = n - 1; j >= 0; j--)
2349 {
2350 regno = ira_non_ordered_class_hard_regs[aclass][j];
2351 if ((regno % nregs) != 0)
2352 {
2353 int index = ira_class_hard_reg_index[aclass][regno];
2354 ira_assert (index != -1);
2355 reg_costs[index] += ALLOCNO_FREQ (a);
2356 }
2357 }
2358 }
2359 }
2360 }
2361 }
2362
2363 /* Add COST to the estimated gain for eliminating REGNO with its
2364 equivalence. If COST is zero, record that no such elimination is
2365 possible. */
2366
2367 void
2368 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2369 {
2370 if (cost == 0)
2371 regno_equiv_gains[regno] = 0;
2372 else
2373 regno_equiv_gains[regno] += cost;
2374 }
2375
2376 void
2377 ira_costs_c_finalize (void)
2378 {
2379 this_target_ira_int->free_ira_costs ();
2380 }