]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-costs.c
Correct a function pre/postcondition [PR102403].
[thirdparty/gcc.git] / gcc / ira-costs.c
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2021 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
37
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
41
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
44
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
47
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
52 {
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
57 };
58
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
69
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
72
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
75
76 /* It is the current size of struct costs. */
77 static size_t struct_costs_size;
78
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
88
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
93
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
96
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
99
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
103
104 /* Execution frequency of the current insn. */
105 static int frequency;
106
107 \f
108
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
111 {
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
122 };
123
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
127
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
130
131 /* Helper for cost_classes hashing. */
132
133 struct cost_classes_hasher : pointer_hash <cost_classes>
134 {
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
138 };
139
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
143 {
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
145 }
146
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 {
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
154 }
155
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
159 {
160 ira_free (v);
161 }
162
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
165
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
175
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
180 {
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
186 {
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 {
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
194 }
195 }
196 }
197
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
201 {
202 int size = sizeof (cost_classes_t) * max_reg_num ();
203
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
215 }
216
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
224 {
225 cost_classes_t classes_ptr;
226
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
233 }
234
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const_hard_reg_set regs)
241 {
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
246 {
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
249
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
254
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl = reg_class_contents[cl] & regs;
258 valid_for_cl &= ~(ira_prohibited_class_mode_regs[cl][mode]
259 | ira_no_alloc_regs);
260 if (hard_reg_set_empty_p (valid_for_cl))
261 continue;
262
263 /* Don't use this class if the set of valid registers is a subset
264 of an existing class. For example, suppose we have two classes
265 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
266 that the mode changes allowed by FR_REGS are not as general as
267 the mode changes allowed by GR_REGS.
268
269 In this situation, the mode changes for GR_AND_FR_REGS could
270 either be seen as the union or the intersection of the mode
271 changes allowed by the two subclasses. The justification for
272 the union-based definition would be that, if you want a mode
273 change that's only allowed by GR_REGS, you can pick a register
274 from the GR_REGS subclass. The justification for the
275 intersection-based definition would be that every register
276 from the class would allow the mode change.
277
278 However, if we have a register that needs to be in GR_REGS,
279 using GR_AND_FR_REGS with the intersection-based definition
280 would be too pessimistic, since it would bring in restrictions
281 that only apply to FR_REGS. Conversely, if we have a register
282 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
283 union-based definition would lose the extra restrictions
284 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
285 for cases where GR_REGS and FP_REGS are both valid. */
286 int pos;
287 for (pos = 0; pos < narrow.num; ++pos)
288 {
289 enum reg_class cl2 = narrow.classes[pos];
290 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
291 break;
292 }
293 map[i] = pos;
294 if (pos == narrow.num)
295 {
296 /* If several classes are equivalent, prefer to use the one
297 that was chosen as the allocno class. */
298 enum reg_class cl2 = ira_allocno_class_translate[cl];
299 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
300 cl = cl2;
301 narrow.classes[narrow.num++] = cl;
302 }
303 }
304 if (narrow.num == full->num)
305 return full;
306
307 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
308 if (*slot == NULL)
309 {
310 cost_classes_t classes = setup_cost_classes (&narrow);
311 /* Map equivalent classes to the representative that we chose above. */
312 for (int i = 0; i < ira_important_classes_num; i++)
313 {
314 enum reg_class cl = ira_important_classes[i];
315 int index = full->index[cl];
316 if (index >= 0)
317 classes->index[cl] = map[index];
318 }
319 *slot = classes;
320 }
321 return *slot;
322 }
323
324 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
325 This function is used when we know an initial approximation of
326 allocno class of the pseudo already, e.g. on the second iteration
327 of class cost calculation or after class cost calculation in
328 register-pressure sensitive insn scheduling or register-pressure
329 sensitive loop-invariant motion. */
330 static void
331 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
332 {
333 static struct cost_classes classes;
334 cost_classes_t classes_ptr;
335 enum reg_class cl;
336 int i;
337 cost_classes **slot;
338 HARD_REG_SET temp, temp2;
339 bool exclude_p;
340
341 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
342 {
343 temp = reg_class_contents[aclass] & ~ira_no_alloc_regs;
344 /* We exclude classes from consideration which are subsets of
345 ACLASS only if ACLASS is an uniform class. */
346 exclude_p = ira_uniform_class_p[aclass];
347 classes.num = 0;
348 for (i = 0; i < ira_important_classes_num; i++)
349 {
350 cl = ira_important_classes[i];
351 if (exclude_p)
352 {
353 /* Exclude non-uniform classes which are subsets of
354 ACLASS. */
355 temp2 = reg_class_contents[cl] & ~ira_no_alloc_regs;
356 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
357 continue;
358 }
359 classes.classes[classes.num++] = cl;
360 }
361 slot = cost_classes_htab->find_slot (&classes, INSERT);
362 if (*slot == NULL)
363 {
364 classes_ptr = setup_cost_classes (&classes);
365 *slot = classes_ptr;
366 }
367 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
368 }
369 if (regno_reg_rtx[regno] != NULL_RTX)
370 {
371 /* Restrict the classes to those that are valid for REGNO's mode
372 (which might for example exclude singleton classes if the mode
373 requires two registers). Also restrict the classes to those that
374 are valid for subregs of REGNO. */
375 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
376 if (!valid_regs)
377 valid_regs = &reg_class_contents[ALL_REGS];
378 classes_ptr = restrict_cost_classes (classes_ptr,
379 PSEUDO_REGNO_MODE (regno),
380 *valid_regs);
381 }
382 regno_cost_classes[regno] = classes_ptr;
383 }
384
385 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
386 decrease number of cost classes for the pseudo, if hard registers
387 of some important classes cannot hold a value of MODE. So the
388 pseudo cannot get hard register of some important classes and cost
389 calculation for such important classes is only wasting CPU
390 time. */
391 static void
392 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
393 {
394 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
395 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
396 mode, *valid_regs);
397 else
398 {
399 if (cost_classes_mode_cache[mode] == NULL)
400 cost_classes_mode_cache[mode]
401 = restrict_cost_classes (&all_cost_classes, mode,
402 reg_class_contents[ALL_REGS]);
403 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
404 }
405 }
406
407 /* Finalize info about the cost classes for each pseudo. */
408 static void
409 finish_regno_cost_classes (void)
410 {
411 ira_free (regno_cost_classes);
412 delete cost_classes_htab;
413 cost_classes_htab = NULL;
414 }
415
416 \f
417
418 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
419 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
420 be a pseudo register. */
421 static int
422 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
423 secondary_reload_info *prev_sri)
424 {
425 secondary_reload_info sri;
426 reg_class_t secondary_class = NO_REGS;
427
428 /* If X is a SCRATCH, there is actually nothing to move since we are
429 assuming optimal allocation. */
430 if (GET_CODE (x) == SCRATCH)
431 return 0;
432
433 /* Get the class we will actually use for a reload. */
434 rclass = targetm.preferred_reload_class (x, rclass);
435
436 /* If we need a secondary reload for an intermediate, the cost is
437 that to load the input into the intermediate register, then to
438 copy it. */
439 sri.prev_sri = prev_sri;
440 sri.extra_cost = 0;
441 /* PR 68770: Secondary reload might examine the t_icode field. */
442 sri.t_icode = CODE_FOR_nothing;
443
444 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
445
446 if (secondary_class != NO_REGS)
447 {
448 ira_init_register_move_cost_if_necessary (mode);
449 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
450 + sri.extra_cost
451 + copy_cost (x, mode, secondary_class, to_p, &sri));
452 }
453
454 /* For memory, use the memory move cost, for (hard) registers, use
455 the cost to move between the register classes, and use 2 for
456 everything else (constants). */
457 if (MEM_P (x) || rclass == NO_REGS)
458 return sri.extra_cost
459 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
460 else if (REG_P (x))
461 {
462 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
463
464 ira_init_register_move_cost_if_necessary (mode);
465 return (sri.extra_cost
466 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
467 }
468 else
469 /* If this is a constant, we may eventually want to call rtx_cost
470 here. */
471 return sri.extra_cost + COSTS_N_INSNS (1);
472 }
473
474 \f
475
476 /* Record the cost of using memory or hard registers of various
477 classes for the operands in INSN.
478
479 N_ALTS is the number of alternatives.
480 N_OPS is the number of operands.
481 OPS is an array of the operands.
482 MODES are the modes of the operands, in case any are VOIDmode.
483 CONSTRAINTS are the constraints to use for the operands. This array
484 is modified by this procedure.
485
486 This procedure works alternative by alternative. For each
487 alternative we assume that we will be able to allocate all allocnos
488 to their ideal register class and calculate the cost of using that
489 alternative. Then we compute, for each operand that is a
490 pseudo-register, the cost of having the allocno allocated to each
491 register class and using it in that alternative. To this cost is
492 added the cost of the alternative.
493
494 The cost of each class for this insn is its lowest cost among all
495 the alternatives. */
496 static void
497 record_reg_classes (int n_alts, int n_ops, rtx *ops,
498 machine_mode *modes, const char **constraints,
499 rtx_insn *insn, enum reg_class *pref)
500 {
501 int alt;
502 int i, j, k;
503 int insn_allows_mem[MAX_RECOG_OPERANDS];
504 move_table *move_in_cost, *move_out_cost;
505 short (*mem_cost)[2];
506
507 for (i = 0; i < n_ops; i++)
508 insn_allows_mem[i] = 0;
509
510 /* Process each alternative, each time minimizing an operand's cost
511 with the cost for each operand in that alternative. */
512 alternative_mask preferred = get_preferred_alternatives (insn);
513 for (alt = 0; alt < n_alts; alt++)
514 {
515 enum reg_class classes[MAX_RECOG_OPERANDS];
516 int allows_mem[MAX_RECOG_OPERANDS];
517 enum reg_class rclass;
518 int alt_fail = 0;
519 int alt_cost = 0, op_cost_add;
520
521 if (!TEST_BIT (preferred, alt))
522 {
523 for (i = 0; i < recog_data.n_operands; i++)
524 constraints[i] = skip_alternative (constraints[i]);
525
526 continue;
527 }
528
529 for (i = 0; i < n_ops; i++)
530 {
531 unsigned char c;
532 const char *p = constraints[i];
533 rtx op = ops[i];
534 machine_mode mode = modes[i];
535 int allows_addr = 0;
536 int win = 0;
537
538 /* Initially show we know nothing about the register class. */
539 classes[i] = NO_REGS;
540 allows_mem[i] = 0;
541
542 /* If this operand has no constraints at all, we can
543 conclude nothing about it since anything is valid. */
544 if (*p == 0)
545 {
546 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
547 memset (this_op_costs[i], 0, struct_costs_size);
548 continue;
549 }
550
551 /* If this alternative is only relevant when this operand
552 matches a previous operand, we do different things
553 depending on whether this operand is a allocno-reg or not.
554 We must process any modifiers for the operand before we
555 can make this test. */
556 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
557 p++;
558
559 if (p[0] >= '0' && p[0] <= '0' + i)
560 {
561 /* Copy class and whether memory is allowed from the
562 matching alternative. Then perform any needed cost
563 computations and/or adjustments. */
564 j = p[0] - '0';
565 classes[i] = classes[j];
566 allows_mem[i] = allows_mem[j];
567 if (allows_mem[i])
568 insn_allows_mem[i] = 1;
569
570 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
571 {
572 /* If this matches the other operand, we have no
573 added cost and we win. */
574 if (rtx_equal_p (ops[j], op))
575 win = 1;
576 /* If we can put the other operand into a register,
577 add to the cost of this alternative the cost to
578 copy this operand to the register used for the
579 other operand. */
580 else if (classes[j] != NO_REGS)
581 {
582 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
583 win = 1;
584 }
585 }
586 else if (! REG_P (ops[j])
587 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
588 {
589 /* This op is an allocno but the one it matches is
590 not. */
591
592 /* If we can't put the other operand into a
593 register, this alternative can't be used. */
594
595 if (classes[j] == NO_REGS)
596 alt_fail = 1;
597 /* Otherwise, add to the cost of this alternative
598 the cost to copy the other operand to the hard
599 register used for this operand. */
600 else
601 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
602 }
603 else
604 {
605 /* The costs of this operand are not the same as the
606 other operand since move costs are not symmetric.
607 Moreover, if we cannot tie them, this alternative
608 needs to do a copy, which is one insn. */
609 struct costs *pp = this_op_costs[i];
610 int *pp_costs = pp->cost;
611 cost_classes_t cost_classes_ptr
612 = regno_cost_classes[REGNO (op)];
613 enum reg_class *cost_classes = cost_classes_ptr->classes;
614 bool in_p = recog_data.operand_type[i] != OP_OUT;
615 bool out_p = recog_data.operand_type[i] != OP_IN;
616 enum reg_class op_class = classes[i];
617
618 ira_init_register_move_cost_if_necessary (mode);
619 if (! in_p)
620 {
621 ira_assert (out_p);
622 if (op_class == NO_REGS)
623 {
624 mem_cost = ira_memory_move_cost[mode];
625 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
626 {
627 rclass = cost_classes[k];
628 pp_costs[k] = mem_cost[rclass][0] * frequency;
629 }
630 }
631 else
632 {
633 move_out_cost = ira_may_move_out_cost[mode];
634 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
635 {
636 rclass = cost_classes[k];
637 pp_costs[k]
638 = move_out_cost[op_class][rclass] * frequency;
639 }
640 }
641 }
642 else if (! out_p)
643 {
644 ira_assert (in_p);
645 if (op_class == NO_REGS)
646 {
647 mem_cost = ira_memory_move_cost[mode];
648 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
649 {
650 rclass = cost_classes[k];
651 pp_costs[k] = mem_cost[rclass][1] * frequency;
652 }
653 }
654 else
655 {
656 move_in_cost = ira_may_move_in_cost[mode];
657 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
658 {
659 rclass = cost_classes[k];
660 pp_costs[k]
661 = move_in_cost[rclass][op_class] * frequency;
662 }
663 }
664 }
665 else
666 {
667 if (op_class == NO_REGS)
668 {
669 mem_cost = ira_memory_move_cost[mode];
670 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
671 {
672 rclass = cost_classes[k];
673 pp_costs[k] = ((mem_cost[rclass][0]
674 + mem_cost[rclass][1])
675 * frequency);
676 }
677 }
678 else
679 {
680 move_in_cost = ira_may_move_in_cost[mode];
681 move_out_cost = ira_may_move_out_cost[mode];
682 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
683 {
684 rclass = cost_classes[k];
685 pp_costs[k] = ((move_in_cost[rclass][op_class]
686 + move_out_cost[op_class][rclass])
687 * frequency);
688 }
689 }
690 }
691
692 /* If the alternative actually allows memory, make
693 things a bit cheaper since we won't need an extra
694 insn to load it. */
695 pp->mem_cost
696 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
697 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
698 - allows_mem[i]) * frequency;
699
700 /* If we have assigned a class to this allocno in
701 our first pass, add a cost to this alternative
702 corresponding to what we would add if this
703 allocno were not in the appropriate class. */
704 if (pref)
705 {
706 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
707
708 if (pref_class == NO_REGS)
709 alt_cost
710 += ((out_p
711 ? ira_memory_move_cost[mode][op_class][0] : 0)
712 + (in_p
713 ? ira_memory_move_cost[mode][op_class][1]
714 : 0));
715 else if (ira_reg_class_intersect
716 [pref_class][op_class] == NO_REGS)
717 alt_cost
718 += ira_register_move_cost[mode][pref_class][op_class];
719 }
720 if (REGNO (ops[i]) != REGNO (ops[j])
721 && ! find_reg_note (insn, REG_DEAD, op))
722 alt_cost += 2;
723
724 p++;
725 }
726 }
727
728 /* Scan all the constraint letters. See if the operand
729 matches any of the constraints. Collect the valid
730 register classes and see if this operand accepts
731 memory. */
732 while ((c = *p))
733 {
734 switch (c)
735 {
736 case '*':
737 /* Ignore the next letter for this pass. */
738 c = *++p;
739 break;
740
741 case '^':
742 alt_cost += 2;
743 break;
744
745 case '?':
746 alt_cost += 2;
747 break;
748
749 case 'g':
750 if (MEM_P (op)
751 || (CONSTANT_P (op)
752 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
753 win = 1;
754 insn_allows_mem[i] = allows_mem[i] = 1;
755 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
756 break;
757
758 default:
759 enum constraint_num cn = lookup_constraint (p);
760 enum reg_class cl;
761 switch (get_constraint_type (cn))
762 {
763 case CT_REGISTER:
764 cl = reg_class_for_constraint (cn);
765 if (cl != NO_REGS)
766 classes[i] = ira_reg_class_subunion[classes[i]][cl];
767 break;
768
769 case CT_CONST_INT:
770 if (CONST_INT_P (op)
771 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
772 win = 1;
773 break;
774
775 case CT_MEMORY:
776 case CT_RELAXED_MEMORY:
777 /* Every MEM can be reloaded to fit. */
778 insn_allows_mem[i] = allows_mem[i] = 1;
779 if (MEM_P (op))
780 win = 1;
781 break;
782
783 case CT_SPECIAL_MEMORY:
784 insn_allows_mem[i] = allows_mem[i] = 1;
785 if (MEM_P (extract_mem_from_operand (op))
786 && constraint_satisfied_p (op, cn))
787 win = 1;
788 break;
789
790 case CT_ADDRESS:
791 /* Every address can be reloaded to fit. */
792 allows_addr = 1;
793 if (address_operand (op, GET_MODE (op))
794 || constraint_satisfied_p (op, cn))
795 win = 1;
796 /* We know this operand is an address, so we
797 want it to be allocated to a hard register
798 that can be the base of an address,
799 i.e. BASE_REG_CLASS. */
800 classes[i]
801 = ira_reg_class_subunion[classes[i]]
802 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
803 ADDRESS, SCRATCH)];
804 break;
805
806 case CT_FIXED_FORM:
807 if (constraint_satisfied_p (op, cn))
808 win = 1;
809 break;
810 }
811 break;
812 }
813 p += CONSTRAINT_LEN (c, p);
814 if (c == ',')
815 break;
816 }
817
818 constraints[i] = p;
819
820 if (alt_fail)
821 break;
822
823 /* How we account for this operand now depends on whether it
824 is a pseudo register or not. If it is, we first check if
825 any register classes are valid. If not, we ignore this
826 alternative, since we want to assume that all allocnos get
827 allocated for register preferencing. If some register
828 class is valid, compute the costs of moving the allocno
829 into that class. */
830 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
831 {
832 if (classes[i] == NO_REGS && ! allows_mem[i])
833 {
834 /* We must always fail if the operand is a REG, but
835 we did not find a suitable class and memory is
836 not allowed.
837
838 Otherwise we may perform an uninitialized read
839 from this_op_costs after the `continue' statement
840 below. */
841 alt_fail = 1;
842 }
843 else
844 {
845 unsigned int regno = REGNO (op);
846 struct costs *pp = this_op_costs[i];
847 int *pp_costs = pp->cost;
848 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
849 enum reg_class *cost_classes = cost_classes_ptr->classes;
850 bool in_p = recog_data.operand_type[i] != OP_OUT;
851 bool out_p = recog_data.operand_type[i] != OP_IN;
852 enum reg_class op_class = classes[i];
853
854 ira_init_register_move_cost_if_necessary (mode);
855 if (! in_p)
856 {
857 ira_assert (out_p);
858 if (op_class == NO_REGS)
859 {
860 mem_cost = ira_memory_move_cost[mode];
861 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
862 {
863 rclass = cost_classes[k];
864 pp_costs[k] = mem_cost[rclass][0] * frequency;
865 }
866 }
867 else
868 {
869 move_out_cost = ira_may_move_out_cost[mode];
870 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
871 {
872 rclass = cost_classes[k];
873 pp_costs[k]
874 = move_out_cost[op_class][rclass] * frequency;
875 }
876 }
877 }
878 else if (! out_p)
879 {
880 ira_assert (in_p);
881 if (op_class == NO_REGS)
882 {
883 mem_cost = ira_memory_move_cost[mode];
884 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
885 {
886 rclass = cost_classes[k];
887 pp_costs[k] = mem_cost[rclass][1] * frequency;
888 }
889 }
890 else
891 {
892 move_in_cost = ira_may_move_in_cost[mode];
893 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
894 {
895 rclass = cost_classes[k];
896 pp_costs[k]
897 = move_in_cost[rclass][op_class] * frequency;
898 }
899 }
900 }
901 else
902 {
903 if (op_class == NO_REGS)
904 {
905 mem_cost = ira_memory_move_cost[mode];
906 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
907 {
908 rclass = cost_classes[k];
909 pp_costs[k] = ((mem_cost[rclass][0]
910 + mem_cost[rclass][1])
911 * frequency);
912 }
913 }
914 else
915 {
916 move_in_cost = ira_may_move_in_cost[mode];
917 move_out_cost = ira_may_move_out_cost[mode];
918 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
919 {
920 rclass = cost_classes[k];
921 pp_costs[k] = ((move_in_cost[rclass][op_class]
922 + move_out_cost[op_class][rclass])
923 * frequency);
924 }
925 }
926 }
927
928 if (op_class == NO_REGS)
929 /* Although we don't need insn to reload from
930 memory, still accessing memory is usually more
931 expensive than a register. */
932 pp->mem_cost = frequency;
933 else
934 /* If the alternative actually allows memory, make
935 things a bit cheaper since we won't need an
936 extra insn to load it. */
937 pp->mem_cost
938 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
939 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
940 - allows_mem[i]) * frequency;
941 /* If we have assigned a class to this allocno in
942 our first pass, add a cost to this alternative
943 corresponding to what we would add if this
944 allocno were not in the appropriate class. */
945 if (pref)
946 {
947 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
948
949 if (pref_class == NO_REGS)
950 {
951 if (op_class != NO_REGS)
952 alt_cost
953 += ((out_p
954 ? ira_memory_move_cost[mode][op_class][0]
955 : 0)
956 + (in_p
957 ? ira_memory_move_cost[mode][op_class][1]
958 : 0));
959 }
960 else if (op_class == NO_REGS)
961 alt_cost
962 += ((out_p
963 ? ira_memory_move_cost[mode][pref_class][1]
964 : 0)
965 + (in_p
966 ? ira_memory_move_cost[mode][pref_class][0]
967 : 0));
968 else if (ira_reg_class_intersect[pref_class][op_class]
969 == NO_REGS)
970 alt_cost += (ira_register_move_cost
971 [mode][pref_class][op_class]);
972 }
973 }
974 }
975
976 /* Otherwise, if this alternative wins, either because we
977 have already determined that or if we have a hard
978 register of the proper class, there is no cost for this
979 alternative. */
980 else if (win || (REG_P (op)
981 && reg_fits_class_p (op, classes[i],
982 0, GET_MODE (op))))
983 ;
984
985 /* If registers are valid, the cost of this alternative
986 includes copying the object to and/or from a
987 register. */
988 else if (classes[i] != NO_REGS)
989 {
990 if (recog_data.operand_type[i] != OP_OUT)
991 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
992
993 if (recog_data.operand_type[i] != OP_IN)
994 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
995 }
996 /* The only other way this alternative can be used is if
997 this is a constant that could be placed into memory. */
998 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
999 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1000 else
1001 alt_fail = 1;
1002
1003 if (alt_fail)
1004 break;
1005 }
1006
1007 if (alt_fail)
1008 {
1009 /* The loop above might have exited early once the failure
1010 was seen. Skip over the constraints for the remaining
1011 operands. */
1012 i += 1;
1013 for (; i < n_ops; ++i)
1014 constraints[i] = skip_alternative (constraints[i]);
1015 continue;
1016 }
1017
1018 op_cost_add = alt_cost * frequency;
1019 /* Finally, update the costs with the information we've
1020 calculated about this alternative. */
1021 for (i = 0; i < n_ops; i++)
1022 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1023 {
1024 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1025 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1026 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1027 cost_classes_t cost_classes_ptr
1028 = regno_cost_classes[REGNO (ops[i])];
1029
1030 pp->mem_cost = MIN (pp->mem_cost,
1031 (qq->mem_cost + op_cost_add) * scale);
1032
1033 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1034 pp_costs[k]
1035 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1036 }
1037 }
1038
1039 if (allocno_p)
1040 for (i = 0; i < n_ops; i++)
1041 {
1042 ira_allocno_t a;
1043 rtx op = ops[i];
1044
1045 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1046 continue;
1047 a = ira_curr_regno_allocno_map [REGNO (op)];
1048 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1049 ALLOCNO_BAD_SPILL_P (a) = true;
1050 }
1051
1052 }
1053
1054 \f
1055
1056 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1057 static inline bool
1058 ok_for_index_p_nonstrict (rtx reg)
1059 {
1060 unsigned regno = REGNO (reg);
1061
1062 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1063 }
1064
1065 /* A version of regno_ok_for_base_p for use here, when all
1066 pseudo-registers should count as OK. Arguments as for
1067 regno_ok_for_base_p. */
1068 static inline bool
1069 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1070 enum rtx_code outer_code, enum rtx_code index_code)
1071 {
1072 unsigned regno = REGNO (reg);
1073
1074 if (regno >= FIRST_PSEUDO_REGISTER)
1075 return true;
1076 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1077 }
1078
1079 /* Record the pseudo registers we must reload into hard registers in a
1080 subexpression of a memory address, X.
1081
1082 If CONTEXT is 0, we are looking at the base part of an address,
1083 otherwise we are looking at the index part.
1084
1085 MODE and AS are the mode and address space of the memory reference;
1086 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1087 These four arguments are passed down to base_reg_class.
1088
1089 SCALE is twice the amount to multiply the cost by (it is twice so
1090 we can represent half-cost adjustments). */
1091 static void
1092 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1093 int context, enum rtx_code outer_code,
1094 enum rtx_code index_code, int scale)
1095 {
1096 enum rtx_code code = GET_CODE (x);
1097 enum reg_class rclass;
1098
1099 if (context == 1)
1100 rclass = INDEX_REG_CLASS;
1101 else
1102 rclass = base_reg_class (mode, as, outer_code, index_code);
1103
1104 switch (code)
1105 {
1106 case CONST_INT:
1107 case CONST:
1108 case PC:
1109 case SYMBOL_REF:
1110 case LABEL_REF:
1111 return;
1112
1113 case PLUS:
1114 /* When we have an address that is a sum, we must determine
1115 whether registers are "base" or "index" regs. If there is a
1116 sum of two registers, we must choose one to be the "base".
1117 Luckily, we can use the REG_POINTER to make a good choice
1118 most of the time. We only need to do this on machines that
1119 can have two registers in an address and where the base and
1120 index register classes are different.
1121
1122 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1123 but that seems bogus since it should only be set when we are
1124 sure the register is being used as a pointer. */
1125 {
1126 rtx arg0 = XEXP (x, 0);
1127 rtx arg1 = XEXP (x, 1);
1128 enum rtx_code code0 = GET_CODE (arg0);
1129 enum rtx_code code1 = GET_CODE (arg1);
1130
1131 /* Look inside subregs. */
1132 if (code0 == SUBREG)
1133 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1134 if (code1 == SUBREG)
1135 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1136
1137 /* If index registers do not appear, or coincide with base registers,
1138 just record registers in any non-constant operands. We
1139 assume here, as well as in the tests below, that all
1140 addresses are in canonical form. */
1141 if (MAX_REGS_PER_ADDRESS == 1
1142 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1143 {
1144 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1145 if (! CONSTANT_P (arg1))
1146 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1147 }
1148
1149 /* If the second operand is a constant integer, it doesn't
1150 change what class the first operand must be. */
1151 else if (CONST_SCALAR_INT_P (arg1))
1152 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1153 /* If the second operand is a symbolic constant, the first
1154 operand must be an index register. */
1155 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1156 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1157 /* If both operands are registers but one is already a hard
1158 register of index or reg-base class, give the other the
1159 class that the hard register is not. */
1160 else if (code0 == REG && code1 == REG
1161 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1162 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1163 || ok_for_index_p_nonstrict (arg0)))
1164 record_address_regs (mode, as, arg1,
1165 ok_for_base_p_nonstrict (arg0, mode, as,
1166 PLUS, REG) ? 1 : 0,
1167 PLUS, REG, scale);
1168 else if (code0 == REG && code1 == REG
1169 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1170 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1171 || ok_for_index_p_nonstrict (arg1)))
1172 record_address_regs (mode, as, arg0,
1173 ok_for_base_p_nonstrict (arg1, mode, as,
1174 PLUS, REG) ? 1 : 0,
1175 PLUS, REG, scale);
1176 /* If one operand is known to be a pointer, it must be the
1177 base with the other operand the index. Likewise if the
1178 other operand is a MULT. */
1179 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1180 {
1181 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1182 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1183 }
1184 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1185 {
1186 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1187 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1188 }
1189 /* Otherwise, count equal chances that each might be a base or
1190 index register. This case should be rare. */
1191 else
1192 {
1193 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1194 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1195 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1196 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1197 }
1198 }
1199 break;
1200
1201 /* Double the importance of an allocno that is incremented or
1202 decremented, since it would take two extra insns if it ends
1203 up in the wrong place. */
1204 case POST_MODIFY:
1205 case PRE_MODIFY:
1206 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1207 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1208 if (REG_P (XEXP (XEXP (x, 1), 1)))
1209 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1210 2 * scale);
1211 break;
1212
1213 case POST_INC:
1214 case PRE_INC:
1215 case POST_DEC:
1216 case PRE_DEC:
1217 /* Double the importance of an allocno that is incremented or
1218 decremented, since it would take two extra insns if it ends
1219 up in the wrong place. */
1220 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1221 break;
1222
1223 case REG:
1224 {
1225 struct costs *pp;
1226 int *pp_costs;
1227 enum reg_class i;
1228 int k, regno, add_cost;
1229 cost_classes_t cost_classes_ptr;
1230 enum reg_class *cost_classes;
1231 move_table *move_in_cost;
1232
1233 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1234 break;
1235
1236 regno = REGNO (x);
1237 if (allocno_p)
1238 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1239 pp = COSTS (costs, COST_INDEX (regno));
1240 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1241 if (INT_MAX - add_cost < pp->mem_cost)
1242 pp->mem_cost = INT_MAX;
1243 else
1244 pp->mem_cost += add_cost;
1245 cost_classes_ptr = regno_cost_classes[regno];
1246 cost_classes = cost_classes_ptr->classes;
1247 pp_costs = pp->cost;
1248 ira_init_register_move_cost_if_necessary (Pmode);
1249 move_in_cost = ira_may_move_in_cost[Pmode];
1250 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1251 {
1252 i = cost_classes[k];
1253 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1254 if (INT_MAX - add_cost < pp_costs[k])
1255 pp_costs[k] = INT_MAX;
1256 else
1257 pp_costs[k] += add_cost;
1258 }
1259 }
1260 break;
1261
1262 default:
1263 {
1264 const char *fmt = GET_RTX_FORMAT (code);
1265 int i;
1266 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1267 if (fmt[i] == 'e')
1268 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1269 scale);
1270 }
1271 }
1272 }
1273
1274 \f
1275
1276 /* Calculate the costs of insn operands. */
1277 static void
1278 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1279 {
1280 const char *constraints[MAX_RECOG_OPERANDS];
1281 machine_mode modes[MAX_RECOG_OPERANDS];
1282 rtx set;
1283 int i;
1284
1285 if ((set = single_set (insn)) != NULL_RTX
1286 /* In rare cases the single set insn might have less 2 operands
1287 as the source can be a fixed special reg. */
1288 && recog_data.n_operands > 1
1289 && recog_data.operand[0] == SET_DEST (set)
1290 && recog_data.operand[1] == SET_SRC (set))
1291 {
1292 int regno, other_regno;
1293 rtx dest = SET_DEST (set);
1294 rtx src = SET_SRC (set);
1295
1296 if (GET_CODE (dest) == SUBREG
1297 && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1298 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1299 dest = SUBREG_REG (dest);
1300 if (GET_CODE (src) == SUBREG
1301 && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1302 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1303 src = SUBREG_REG (src);
1304 if (REG_P (src) && REG_P (dest)
1305 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1306 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1307 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1308 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1309 {
1310 machine_mode mode = GET_MODE (SET_SRC (set));
1311 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1312 enum reg_class *cost_classes = cost_classes_ptr->classes;
1313 reg_class_t rclass, hard_reg_class, pref_class, bigger_hard_reg_class;
1314 int cost, k;
1315 move_table *move_costs;
1316 bool dead_p = find_regno_note (insn, REG_DEAD, REGNO (src));
1317
1318 ira_init_register_move_cost_if_necessary (mode);
1319 move_costs = ira_register_move_cost[mode];
1320 hard_reg_class = REGNO_REG_CLASS (other_regno);
1321 bigger_hard_reg_class = ira_pressure_class_translate[hard_reg_class];
1322 /* Target code may return any cost for mode which does not
1323 fit the hard reg class (e.g. DImode for AREG on
1324 i386). Check this and use a bigger class to get the
1325 right cost. */
1326 if (bigger_hard_reg_class != NO_REGS
1327 && ! ira_hard_reg_in_set_p (other_regno, mode,
1328 reg_class_contents[hard_reg_class]))
1329 hard_reg_class = bigger_hard_reg_class;
1330 i = regno == (int) REGNO (src) ? 1 : 0;
1331 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1332 {
1333 rclass = cost_classes[k];
1334 cost = (i == 0
1335 ? move_costs[hard_reg_class][rclass]
1336 : move_costs[rclass][hard_reg_class]);
1337
1338 op_costs[i]->cost[k] = cost * frequency;
1339 /* If we have assigned a class to this allocno in our
1340 first pass, add a cost to this alternative
1341 corresponding to what we would add if this allocno
1342 were not in the appropriate class. */
1343 if (pref)
1344 {
1345 if ((pref_class = pref[COST_INDEX (regno)]) == NO_REGS)
1346 op_costs[i]->cost[k]
1347 += ((i == 0 ? ira_memory_move_cost[mode][rclass][0] : 0)
1348 + (i == 1 ? ira_memory_move_cost[mode][rclass][1] : 0)
1349 * frequency);
1350 else if (ira_reg_class_intersect[pref_class][rclass]
1351 == NO_REGS)
1352 op_costs[i]->cost[k]
1353 += (move_costs[pref_class][rclass]
1354 * frequency);
1355 }
1356 /* If this insn is a single set copying operand 1 to
1357 operand 0 and one operand is an allocno with the
1358 other a hard reg or an allocno that prefers a hard
1359 register that is in its own register class then we
1360 may want to adjust the cost of that register class to
1361 -1.
1362
1363 Avoid the adjustment if the source does not die to
1364 avoid stressing of register allocator by preferencing
1365 two colliding registers into single class. */
1366 if (dead_p
1367 && TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1368 && (reg_class_size[(int) rclass]
1369 == (ira_reg_class_max_nregs
1370 [(int) rclass][(int) GET_MODE(src)])))
1371 {
1372 if (reg_class_size[rclass] == 1)
1373 op_costs[i]->cost[k] = -frequency;
1374 else if (in_hard_reg_set_p (reg_class_contents[rclass],
1375 GET_MODE(src), other_regno))
1376 op_costs[i]->cost[k] = -frequency;
1377 }
1378 }
1379 op_costs[i]->mem_cost
1380 = ira_memory_move_cost[mode][hard_reg_class][i] * frequency;
1381 if (pref && (pref_class = pref[COST_INDEX (regno)]) != NO_REGS)
1382 op_costs[i]->mem_cost
1383 += ira_memory_move_cost[mode][pref_class][i] * frequency;
1384 return;
1385 }
1386 }
1387
1388 for (i = 0; i < recog_data.n_operands; i++)
1389 {
1390 constraints[i] = recog_data.constraints[i];
1391 modes[i] = recog_data.operand_mode[i];
1392 }
1393
1394 /* If we get here, we are set up to record the costs of all the
1395 operands for this insn. Start by initializing the costs. Then
1396 handle any address registers. Finally record the desired classes
1397 for any allocnos, doing it twice if some pair of operands are
1398 commutative. */
1399 for (i = 0; i < recog_data.n_operands; i++)
1400 {
1401 rtx op_mem = extract_mem_from_operand (recog_data.operand[i]);
1402 memcpy (op_costs[i], init_cost, struct_costs_size);
1403
1404 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1405 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1406
1407 if (MEM_P (op_mem))
1408 record_address_regs (GET_MODE (op_mem),
1409 MEM_ADDR_SPACE (op_mem),
1410 XEXP (op_mem, 0),
1411 0, MEM, SCRATCH, frequency * 2);
1412 else if (constraints[i][0] == 'p'
1413 || (insn_extra_address_constraint
1414 (lookup_constraint (constraints[i]))))
1415 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1416 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1417 frequency * 2);
1418 }
1419
1420 /* Check for commutative in a separate loop so everything will have
1421 been initialized. We must do this even if one operand is a
1422 constant--see addsi3 in m68k.md. */
1423 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1424 if (constraints[i][0] == '%')
1425 {
1426 const char *xconstraints[MAX_RECOG_OPERANDS];
1427 int j;
1428
1429 /* Handle commutative operands by swapping the
1430 constraints. We assume the modes are the same. */
1431 for (j = 0; j < recog_data.n_operands; j++)
1432 xconstraints[j] = constraints[j];
1433
1434 xconstraints[i] = constraints[i+1];
1435 xconstraints[i+1] = constraints[i];
1436 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1437 recog_data.operand, modes,
1438 xconstraints, insn, pref);
1439 }
1440 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1441 recog_data.operand, modes,
1442 constraints, insn, pref);
1443 }
1444
1445 \f
1446
1447 /* Process one insn INSN. Scan it and record each time it would save
1448 code to put a certain allocnos in a certain class. Return the last
1449 insn processed, so that the scan can be continued from there. */
1450 static rtx_insn *
1451 scan_one_insn (rtx_insn *insn)
1452 {
1453 enum rtx_code pat_code;
1454 rtx set, note;
1455 int i, k;
1456 bool counted_mem;
1457
1458 if (!NONDEBUG_INSN_P (insn))
1459 return insn;
1460
1461 pat_code = GET_CODE (PATTERN (insn));
1462 if (pat_code == ASM_INPUT)
1463 return insn;
1464
1465 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1466 and initialize the register move costs of mode M.
1467
1468 The pseudo may be related to another pseudo via a copy (implicit or
1469 explicit) and if there are no mode M uses/sets of the original
1470 pseudo, then we may leave the register move costs uninitialized for
1471 mode M. */
1472 if (pat_code == USE || pat_code == CLOBBER)
1473 {
1474 rtx x = XEXP (PATTERN (insn), 0);
1475 if (GET_CODE (x) == REG
1476 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1477 && have_regs_of_mode[GET_MODE (x)])
1478 ira_init_register_move_cost_if_necessary (GET_MODE (x));
1479 return insn;
1480 }
1481
1482 counted_mem = false;
1483 set = single_set (insn);
1484 extract_insn (insn);
1485
1486 /* If this insn loads a parameter from its stack slot, then it
1487 represents a savings, rather than a cost, if the parameter is
1488 stored in memory. Record this fact.
1489
1490 Similarly if we're loading other constants from memory (constant
1491 pool, TOC references, small data areas, etc) and this is the only
1492 assignment to the destination pseudo.
1493
1494 Don't do this if SET_SRC (set) isn't a general operand, if it is
1495 a memory requiring special instructions to load it, decreasing
1496 mem_cost might result in it being loaded using the specialized
1497 instruction into a register, then stored into stack and loaded
1498 again from the stack. See PR52208.
1499
1500 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1501 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1502 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1503 && ((MEM_P (XEXP (note, 0))
1504 && !side_effects_p (SET_SRC (set)))
1505 || (CONSTANT_P (XEXP (note, 0))
1506 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1507 XEXP (note, 0))
1508 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1509 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1510 /* LRA does not use equiv with a symbol for PIC code. */
1511 && (! ira_use_lra_p || ! pic_offset_table_rtx
1512 || ! contains_symbol_ref_p (XEXP (note, 0))))
1513 {
1514 enum reg_class cl = GENERAL_REGS;
1515 rtx reg = SET_DEST (set);
1516 int num = COST_INDEX (REGNO (reg));
1517
1518 COSTS (costs, num)->mem_cost
1519 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1520 record_address_regs (GET_MODE (SET_SRC (set)),
1521 MEM_ADDR_SPACE (SET_SRC (set)),
1522 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1523 frequency * 2);
1524 counted_mem = true;
1525 }
1526
1527 record_operand_costs (insn, pref);
1528
1529 /* Now add the cost for each operand to the total costs for its
1530 allocno. */
1531 for (i = 0; i < recog_data.n_operands; i++)
1532 {
1533 rtx op = recog_data.operand[i];
1534
1535 if (GET_CODE (op) == SUBREG)
1536 op = SUBREG_REG (op);
1537 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1538 {
1539 int regno = REGNO (op);
1540 struct costs *p = COSTS (costs, COST_INDEX (regno));
1541 struct costs *q = op_costs[i];
1542 int *p_costs = p->cost, *q_costs = q->cost;
1543 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1544 int add_cost;
1545
1546 /* If the already accounted for the memory "cost" above, don't
1547 do so again. */
1548 if (!counted_mem)
1549 {
1550 add_cost = q->mem_cost;
1551 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1552 p->mem_cost = INT_MAX;
1553 else
1554 p->mem_cost += add_cost;
1555 }
1556 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1557 {
1558 add_cost = q_costs[k];
1559 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1560 p_costs[k] = INT_MAX;
1561 else
1562 p_costs[k] += add_cost;
1563 }
1564 }
1565 }
1566 return insn;
1567 }
1568
1569 \f
1570
1571 /* Print allocnos costs to file F. */
1572 static void
1573 print_allocno_costs (FILE *f)
1574 {
1575 int k;
1576 ira_allocno_t a;
1577 ira_allocno_iterator ai;
1578
1579 ira_assert (allocno_p);
1580 fprintf (f, "\n");
1581 FOR_EACH_ALLOCNO (a, ai)
1582 {
1583 int i, rclass;
1584 basic_block bb;
1585 int regno = ALLOCNO_REGNO (a);
1586 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1587 enum reg_class *cost_classes = cost_classes_ptr->classes;
1588
1589 i = ALLOCNO_NUM (a);
1590 fprintf (f, " a%d(r%d,", i, regno);
1591 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1592 fprintf (f, "b%d", bb->index);
1593 else
1594 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1595 fprintf (f, ") costs:");
1596 for (k = 0; k < cost_classes_ptr->num; k++)
1597 {
1598 rclass = cost_classes[k];
1599 fprintf (f, " %s:%d", reg_class_names[rclass],
1600 COSTS (costs, i)->cost[k]);
1601 if (flag_ira_region == IRA_REGION_ALL
1602 || flag_ira_region == IRA_REGION_MIXED)
1603 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1604 }
1605 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1606 if (flag_ira_region == IRA_REGION_ALL
1607 || flag_ira_region == IRA_REGION_MIXED)
1608 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1609 fprintf (f, "\n");
1610 }
1611 }
1612
1613 /* Print pseudo costs to file F. */
1614 static void
1615 print_pseudo_costs (FILE *f)
1616 {
1617 int regno, k;
1618 int rclass;
1619 cost_classes_t cost_classes_ptr;
1620 enum reg_class *cost_classes;
1621
1622 ira_assert (! allocno_p);
1623 fprintf (f, "\n");
1624 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1625 {
1626 if (REG_N_REFS (regno) <= 0)
1627 continue;
1628 cost_classes_ptr = regno_cost_classes[regno];
1629 cost_classes = cost_classes_ptr->classes;
1630 fprintf (f, " r%d costs:", regno);
1631 for (k = 0; k < cost_classes_ptr->num; k++)
1632 {
1633 rclass = cost_classes[k];
1634 fprintf (f, " %s:%d", reg_class_names[rclass],
1635 COSTS (costs, regno)->cost[k]);
1636 }
1637 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1638 }
1639 }
1640
1641 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1642 costs. */
1643 static void
1644 process_bb_for_costs (basic_block bb)
1645 {
1646 rtx_insn *insn;
1647
1648 frequency = REG_FREQ_FROM_BB (bb);
1649 if (frequency == 0)
1650 frequency = 1;
1651 FOR_BB_INSNS (bb, insn)
1652 insn = scan_one_insn (insn);
1653 }
1654
1655 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1656 costs. */
1657 static void
1658 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1659 {
1660 basic_block bb;
1661
1662 bb = loop_tree_node->bb;
1663 if (bb != NULL)
1664 process_bb_for_costs (bb);
1665 }
1666
1667 /* Find costs of register classes and memory for allocnos or pseudos
1668 and their best costs. Set up preferred, alternative and allocno
1669 classes for pseudos. */
1670 static void
1671 find_costs_and_classes (FILE *dump_file)
1672 {
1673 int i, k, start, max_cost_classes_num;
1674 int pass;
1675 basic_block bb;
1676 enum reg_class *regno_best_class, new_class;
1677
1678 init_recog ();
1679 regno_best_class
1680 = (enum reg_class *) ira_allocate (max_reg_num ()
1681 * sizeof (enum reg_class));
1682 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1683 regno_best_class[i] = NO_REGS;
1684 if (!resize_reg_info () && allocno_p
1685 && pseudo_classes_defined_p && flag_expensive_optimizations)
1686 {
1687 ira_allocno_t a;
1688 ira_allocno_iterator ai;
1689
1690 pref = pref_buffer;
1691 max_cost_classes_num = 1;
1692 FOR_EACH_ALLOCNO (a, ai)
1693 {
1694 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1695 setup_regno_cost_classes_by_aclass
1696 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1697 max_cost_classes_num
1698 = MAX (max_cost_classes_num,
1699 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1700 }
1701 start = 1;
1702 }
1703 else
1704 {
1705 pref = NULL;
1706 max_cost_classes_num = ira_important_classes_num;
1707 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1708 if (regno_reg_rtx[i] != NULL_RTX)
1709 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1710 else
1711 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1712 start = 0;
1713 }
1714 if (allocno_p)
1715 /* Clear the flag for the next compiled function. */
1716 pseudo_classes_defined_p = false;
1717 /* Normally we scan the insns once and determine the best class to
1718 use for each allocno. However, if -fexpensive-optimizations are
1719 on, we do so twice, the second time using the tentative best
1720 classes to guide the selection. */
1721 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1722 {
1723 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1724 fprintf (dump_file,
1725 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1726
1727 if (pass != start)
1728 {
1729 max_cost_classes_num = 1;
1730 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1731 {
1732 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1733 max_cost_classes_num
1734 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1735 }
1736 }
1737
1738 struct_costs_size
1739 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1740 /* Zero out our accumulation of the cost of each class for each
1741 allocno. */
1742 memset (costs, 0, cost_elements_num * struct_costs_size);
1743
1744 if (allocno_p)
1745 {
1746 /* Scan the instructions and record each time it would save code
1747 to put a certain allocno in a certain class. */
1748 ira_traverse_loop_tree (true, ira_loop_tree_root,
1749 process_bb_node_for_costs, NULL);
1750
1751 memcpy (total_allocno_costs, costs,
1752 max_struct_costs_size * ira_allocnos_num);
1753 }
1754 else
1755 {
1756 basic_block bb;
1757
1758 FOR_EACH_BB_FN (bb, cfun)
1759 process_bb_for_costs (bb);
1760 }
1761
1762 if (pass == 0)
1763 pref = pref_buffer;
1764
1765 /* Now for each allocno look at how desirable each class is and
1766 find which class is preferred. */
1767 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1768 {
1769 ira_allocno_t a, parent_a;
1770 int rclass, a_num, parent_a_num, add_cost;
1771 ira_loop_tree_node_t parent;
1772 int best_cost, allocno_cost;
1773 enum reg_class best, alt_class;
1774 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1775 enum reg_class *cost_classes;
1776 int *i_costs = temp_costs->cost;
1777 int i_mem_cost;
1778 int equiv_savings = regno_equiv_gains[i];
1779
1780 if (! allocno_p)
1781 {
1782 if (regno_reg_rtx[i] == NULL_RTX)
1783 continue;
1784 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1785 i_mem_cost = temp_costs->mem_cost;
1786 cost_classes = cost_classes_ptr->classes;
1787 }
1788 else
1789 {
1790 if (ira_regno_allocno_map[i] == NULL)
1791 continue;
1792 memset (temp_costs, 0, struct_costs_size);
1793 i_mem_cost = 0;
1794 cost_classes = cost_classes_ptr->classes;
1795 /* Find cost of all allocnos with the same regno. */
1796 for (a = ira_regno_allocno_map[i];
1797 a != NULL;
1798 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1799 {
1800 int *a_costs, *p_costs;
1801
1802 a_num = ALLOCNO_NUM (a);
1803 if ((flag_ira_region == IRA_REGION_ALL
1804 || flag_ira_region == IRA_REGION_MIXED)
1805 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1806 && (parent_a = parent->regno_allocno_map[i]) != NULL
1807 /* There are no caps yet. */
1808 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1809 (a)->border_allocnos,
1810 ALLOCNO_NUM (a)))
1811 {
1812 /* Propagate costs to upper levels in the region
1813 tree. */
1814 parent_a_num = ALLOCNO_NUM (parent_a);
1815 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1816 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1817 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1818 {
1819 add_cost = a_costs[k];
1820 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1821 p_costs[k] = INT_MAX;
1822 else
1823 p_costs[k] += add_cost;
1824 }
1825 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1826 if (add_cost > 0
1827 && (INT_MAX - add_cost
1828 < COSTS (total_allocno_costs,
1829 parent_a_num)->mem_cost))
1830 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1831 = INT_MAX;
1832 else
1833 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1834 += add_cost;
1835
1836 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1837 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1838 }
1839 a_costs = COSTS (costs, a_num)->cost;
1840 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1841 {
1842 add_cost = a_costs[k];
1843 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1844 i_costs[k] = INT_MAX;
1845 else
1846 i_costs[k] += add_cost;
1847 }
1848 add_cost = COSTS (costs, a_num)->mem_cost;
1849 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1850 i_mem_cost = INT_MAX;
1851 else
1852 i_mem_cost += add_cost;
1853 }
1854 }
1855 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1856 i_mem_cost = 0;
1857 else if (equiv_savings < 0)
1858 i_mem_cost = -equiv_savings;
1859 else if (equiv_savings > 0)
1860 {
1861 i_mem_cost = 0;
1862 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1863 i_costs[k] += equiv_savings;
1864 }
1865
1866 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1867 best = ALL_REGS;
1868 alt_class = NO_REGS;
1869 /* Find best common class for all allocnos with the same
1870 regno. */
1871 for (k = 0; k < cost_classes_ptr->num; k++)
1872 {
1873 rclass = cost_classes[k];
1874 if (i_costs[k] < best_cost)
1875 {
1876 best_cost = i_costs[k];
1877 best = (enum reg_class) rclass;
1878 }
1879 else if (i_costs[k] == best_cost)
1880 best = ira_reg_class_subunion[best][rclass];
1881 if (pass == flag_expensive_optimizations
1882 /* We still prefer registers to memory even at this
1883 stage if their costs are the same. We will make
1884 a final decision during assigning hard registers
1885 when we have all info including more accurate
1886 costs which might be affected by assigning hard
1887 registers to other pseudos because the pseudos
1888 involved in moves can be coalesced. */
1889 && i_costs[k] <= i_mem_cost
1890 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1891 > reg_class_size[alt_class]))
1892 alt_class = reg_class_subunion[alt_class][rclass];
1893 }
1894 alt_class = ira_allocno_class_translate[alt_class];
1895 if (best_cost > i_mem_cost
1896 && ! non_spilled_static_chain_regno_p (i))
1897 regno_aclass[i] = NO_REGS;
1898 else if (!optimize && !targetm.class_likely_spilled_p (best))
1899 /* Registers in the alternative class are likely to need
1900 longer or slower sequences than registers in the best class.
1901 When optimizing we make some effort to use the best class
1902 over the alternative class where possible, but at -O0 we
1903 effectively give the alternative class equal weight.
1904 We then run the risk of using slower alternative registers
1905 when plenty of registers from the best class are still free.
1906 This is especially true because live ranges tend to be very
1907 short in -O0 code and so register pressure tends to be low.
1908
1909 Avoid that by ignoring the alternative class if the best
1910 class has plenty of registers.
1911
1912 The union class arrays give important classes and only
1913 part of it are allocno classes. So translate them into
1914 allocno classes. */
1915 regno_aclass[i] = ira_allocno_class_translate[best];
1916 else
1917 {
1918 /* Make the common class the biggest class of best and
1919 alt_class. Translate the common class into an
1920 allocno class too. */
1921 regno_aclass[i] = (ira_allocno_class_translate
1922 [ira_reg_class_superunion[best][alt_class]]);
1923 ira_assert (regno_aclass[i] != NO_REGS
1924 && ira_reg_allocno_class_p[regno_aclass[i]]);
1925 }
1926 if ((new_class
1927 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1928 (i, regno_aclass[i], best))) != regno_aclass[i])
1929 {
1930 regno_aclass[i] = new_class;
1931 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1932 reg_class_contents[best]))
1933 best = new_class;
1934 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1935 reg_class_contents[alt_class]))
1936 alt_class = new_class;
1937 }
1938 if (pass == flag_expensive_optimizations)
1939 {
1940 if (best_cost > i_mem_cost
1941 /* Do not assign NO_REGS to static chain pointer
1942 pseudo when non-local goto is used. */
1943 && ! non_spilled_static_chain_regno_p (i))
1944 best = alt_class = NO_REGS;
1945 else if (best == alt_class)
1946 alt_class = NO_REGS;
1947 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1948 if ((!allocno_p || internal_flag_ira_verbose > 2)
1949 && dump_file != NULL)
1950 fprintf (dump_file,
1951 " r%d: preferred %s, alternative %s, allocno %s\n",
1952 i, reg_class_names[best], reg_class_names[alt_class],
1953 reg_class_names[regno_aclass[i]]);
1954 }
1955 regno_best_class[i] = best;
1956 if (! allocno_p)
1957 {
1958 pref[i] = (best_cost > i_mem_cost
1959 && ! non_spilled_static_chain_regno_p (i)
1960 ? NO_REGS : best);
1961 continue;
1962 }
1963 for (a = ira_regno_allocno_map[i];
1964 a != NULL;
1965 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1966 {
1967 enum reg_class aclass = regno_aclass[i];
1968 int a_num = ALLOCNO_NUM (a);
1969 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1970 int *a_costs = COSTS (costs, a_num)->cost;
1971
1972 if (aclass == NO_REGS)
1973 best = NO_REGS;
1974 else
1975 {
1976 /* Finding best class which is subset of the common
1977 class. */
1978 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1979 allocno_cost = best_cost;
1980 best = ALL_REGS;
1981 for (k = 0; k < cost_classes_ptr->num; k++)
1982 {
1983 rclass = cost_classes[k];
1984 if (! ira_class_subset_p[rclass][aclass])
1985 continue;
1986 if (total_a_costs[k] < best_cost)
1987 {
1988 best_cost = total_a_costs[k];
1989 allocno_cost = a_costs[k];
1990 best = (enum reg_class) rclass;
1991 }
1992 else if (total_a_costs[k] == best_cost)
1993 {
1994 best = ira_reg_class_subunion[best][rclass];
1995 allocno_cost = MAX (allocno_cost, a_costs[k]);
1996 }
1997 }
1998 ALLOCNO_CLASS_COST (a) = allocno_cost;
1999 }
2000 if (internal_flag_ira_verbose > 2 && dump_file != NULL
2001 && (pass == 0 || pref[a_num] != best))
2002 {
2003 fprintf (dump_file, " a%d (r%d,", a_num, i);
2004 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
2005 fprintf (dump_file, "b%d", bb->index);
2006 else
2007 fprintf (dump_file, "l%d",
2008 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
2009 fprintf (dump_file, ") best %s, allocno %s\n",
2010 reg_class_names[best],
2011 reg_class_names[aclass]);
2012 }
2013 pref[a_num] = best;
2014 if (pass == flag_expensive_optimizations && best != aclass
2015 && ira_class_hard_regs_num[best] > 0
2016 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
2017 >= ira_class_hard_regs_num[best]))
2018 {
2019 int ind = cost_classes_ptr->index[aclass];
2020
2021 ira_assert (ind >= 0);
2022 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
2023 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
2024 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
2025 / (ira_register_move_cost
2026 [ALLOCNO_MODE (a)][best][aclass]));
2027 for (k = 0; k < cost_classes_ptr->num; k++)
2028 if (ira_class_subset_p[cost_classes[k]][best])
2029 a_costs[k] = a_costs[ind];
2030 }
2031 }
2032 }
2033
2034 if (internal_flag_ira_verbose > 4 && dump_file)
2035 {
2036 if (allocno_p)
2037 print_allocno_costs (dump_file);
2038 else
2039 print_pseudo_costs (dump_file);
2040 fprintf (dump_file,"\n");
2041 }
2042 }
2043 ira_free (regno_best_class);
2044 }
2045
2046 \f
2047
2048 /* Process moves involving hard regs to modify allocno hard register
2049 costs. We can do this only after determining allocno class. If a
2050 hard register forms a register class, then moves with the hard
2051 register are already taken into account in class costs for the
2052 allocno. */
2053 static void
2054 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2055 {
2056 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2057 bool to_p;
2058 ira_allocno_t a, curr_a;
2059 ira_loop_tree_node_t curr_loop_tree_node;
2060 enum reg_class rclass;
2061 basic_block bb;
2062 rtx_insn *insn;
2063 rtx set, src, dst;
2064
2065 bb = loop_tree_node->bb;
2066 if (bb == NULL)
2067 return;
2068 freq = REG_FREQ_FROM_BB (bb);
2069 if (freq == 0)
2070 freq = 1;
2071 FOR_BB_INSNS (bb, insn)
2072 {
2073 if (!NONDEBUG_INSN_P (insn))
2074 continue;
2075 set = single_set (insn);
2076 if (set == NULL_RTX)
2077 continue;
2078 dst = SET_DEST (set);
2079 src = SET_SRC (set);
2080 if (! REG_P (dst) || ! REG_P (src))
2081 continue;
2082 dst_regno = REGNO (dst);
2083 src_regno = REGNO (src);
2084 if (dst_regno >= FIRST_PSEUDO_REGISTER
2085 && src_regno < FIRST_PSEUDO_REGISTER)
2086 {
2087 hard_regno = src_regno;
2088 a = ira_curr_regno_allocno_map[dst_regno];
2089 to_p = true;
2090 }
2091 else if (src_regno >= FIRST_PSEUDO_REGISTER
2092 && dst_regno < FIRST_PSEUDO_REGISTER)
2093 {
2094 hard_regno = dst_regno;
2095 a = ira_curr_regno_allocno_map[src_regno];
2096 to_p = false;
2097 }
2098 else
2099 continue;
2100 if (reg_class_size[(int) REGNO_REG_CLASS (hard_regno)]
2101 == (ira_reg_class_max_nregs
2102 [REGNO_REG_CLASS (hard_regno)][(int) ALLOCNO_MODE(a)]))
2103 /* If the class can provide only one hard reg to the allocno,
2104 we processed the insn record_operand_costs already and we
2105 actually updated the hard reg cost there. */
2106 continue;
2107 rclass = ALLOCNO_CLASS (a);
2108 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2109 continue;
2110 i = ira_class_hard_reg_index[rclass][hard_regno];
2111 if (i < 0)
2112 continue;
2113 a_regno = ALLOCNO_REGNO (a);
2114 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2115 curr_loop_tree_node != NULL;
2116 curr_loop_tree_node = curr_loop_tree_node->parent)
2117 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2118 ira_add_allocno_pref (curr_a, hard_regno, freq);
2119 {
2120 int cost;
2121 enum reg_class hard_reg_class;
2122 machine_mode mode;
2123
2124 mode = ALLOCNO_MODE (a);
2125 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2126 ira_init_register_move_cost_if_necessary (mode);
2127 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2128 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2129 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2130 ALLOCNO_CLASS_COST (a));
2131 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2132 rclass, 0);
2133 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2134 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2135 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2136 ALLOCNO_HARD_REG_COSTS (a)[i]);
2137 }
2138 }
2139 }
2140
2141 /* After we find hard register and memory costs for allocnos, define
2142 its class and modify hard register cost because insns moving
2143 allocno to/from hard registers. */
2144 static void
2145 setup_allocno_class_and_costs (void)
2146 {
2147 int i, j, n, regno, hard_regno, num;
2148 int *reg_costs;
2149 enum reg_class aclass, rclass;
2150 ira_allocno_t a;
2151 ira_allocno_iterator ai;
2152 cost_classes_t cost_classes_ptr;
2153
2154 ira_assert (allocno_p);
2155 FOR_EACH_ALLOCNO (a, ai)
2156 {
2157 i = ALLOCNO_NUM (a);
2158 regno = ALLOCNO_REGNO (a);
2159 aclass = regno_aclass[regno];
2160 cost_classes_ptr = regno_cost_classes[regno];
2161 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2162 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2163 ira_set_allocno_class (a, aclass);
2164 if (aclass == NO_REGS)
2165 continue;
2166 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2167 {
2168 n = ira_class_hard_regs_num[aclass];
2169 ALLOCNO_HARD_REG_COSTS (a)
2170 = reg_costs = ira_allocate_cost_vector (aclass);
2171 for (j = n - 1; j >= 0; j--)
2172 {
2173 hard_regno = ira_class_hard_regs[aclass][j];
2174 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2175 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2176 else
2177 {
2178 rclass = REGNO_REG_CLASS (hard_regno);
2179 num = cost_classes_ptr->index[rclass];
2180 if (num < 0)
2181 {
2182 num = cost_classes_ptr->hard_regno_index[hard_regno];
2183 ira_assert (num >= 0);
2184 }
2185 reg_costs[j] = COSTS (costs, i)->cost[num];
2186 }
2187 }
2188 }
2189 }
2190 if (optimize)
2191 ira_traverse_loop_tree (true, ira_loop_tree_root,
2192 process_bb_node_for_hard_reg_moves, NULL);
2193 }
2194
2195 \f
2196
2197 /* Function called once during compiler work. */
2198 void
2199 ira_init_costs_once (void)
2200 {
2201 int i;
2202
2203 init_cost = NULL;
2204 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2205 {
2206 op_costs[i] = NULL;
2207 this_op_costs[i] = NULL;
2208 }
2209 temp_costs = NULL;
2210 }
2211
2212 /* Free allocated temporary cost vectors. */
2213 void
2214 target_ira_int::free_ira_costs ()
2215 {
2216 int i;
2217
2218 free (x_init_cost);
2219 x_init_cost = NULL;
2220 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2221 {
2222 free (x_op_costs[i]);
2223 free (x_this_op_costs[i]);
2224 x_op_costs[i] = x_this_op_costs[i] = NULL;
2225 }
2226 free (x_temp_costs);
2227 x_temp_costs = NULL;
2228 }
2229
2230 /* This is called each time register related information is
2231 changed. */
2232 void
2233 ira_init_costs (void)
2234 {
2235 int i;
2236
2237 this_target_ira_int->free_ira_costs ();
2238 max_struct_costs_size
2239 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2240 /* Don't use ira_allocate because vectors live through several IRA
2241 calls. */
2242 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2243 init_cost->mem_cost = 1000000;
2244 for (i = 0; i < ira_important_classes_num; i++)
2245 init_cost->cost[i] = 1000000;
2246 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2247 {
2248 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2249 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2250 }
2251 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2252 }
2253
2254 \f
2255
2256 /* Common initialization function for ira_costs and
2257 ira_set_pseudo_classes. */
2258 static void
2259 init_costs (void)
2260 {
2261 init_subregs_of_mode ();
2262 costs = (struct costs *) ira_allocate (max_struct_costs_size
2263 * cost_elements_num);
2264 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2265 * cost_elements_num);
2266 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2267 * max_reg_num ());
2268 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2269 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2270 }
2271
2272 /* Common finalization function for ira_costs and
2273 ira_set_pseudo_classes. */
2274 static void
2275 finish_costs (void)
2276 {
2277 finish_subregs_of_mode ();
2278 ira_free (regno_equiv_gains);
2279 ira_free (regno_aclass);
2280 ira_free (pref_buffer);
2281 ira_free (costs);
2282 }
2283
2284 /* Entry function which defines register class, memory and hard
2285 register costs for each allocno. */
2286 void
2287 ira_costs (void)
2288 {
2289 allocno_p = true;
2290 cost_elements_num = ira_allocnos_num;
2291 init_costs ();
2292 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2293 * ira_allocnos_num);
2294 initiate_regno_cost_classes ();
2295 calculate_elim_costs_all_insns ();
2296 find_costs_and_classes (ira_dump_file);
2297 setup_allocno_class_and_costs ();
2298 finish_regno_cost_classes ();
2299 finish_costs ();
2300 ira_free (total_allocno_costs);
2301 }
2302
2303 /* Entry function which defines classes for pseudos.
2304 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2305 void
2306 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2307 {
2308 allocno_p = false;
2309 internal_flag_ira_verbose = flag_ira_verbose;
2310 cost_elements_num = max_reg_num ();
2311 init_costs ();
2312 initiate_regno_cost_classes ();
2313 find_costs_and_classes (dump_file);
2314 finish_regno_cost_classes ();
2315 if (define_pseudo_classes)
2316 pseudo_classes_defined_p = true;
2317
2318 finish_costs ();
2319 }
2320
2321 \f
2322
2323 /* Change hard register costs for allocnos which lives through
2324 function calls. This is called only when we found all intersected
2325 calls during building allocno live ranges. */
2326 void
2327 ira_tune_allocno_costs (void)
2328 {
2329 int j, n, regno;
2330 int cost, min_cost, *reg_costs;
2331 enum reg_class aclass, rclass;
2332 machine_mode mode;
2333 ira_allocno_t a;
2334 ira_allocno_iterator ai;
2335 ira_allocno_object_iterator oi;
2336 ira_object_t obj;
2337 bool skip_p;
2338
2339 FOR_EACH_ALLOCNO (a, ai)
2340 {
2341 aclass = ALLOCNO_CLASS (a);
2342 if (aclass == NO_REGS)
2343 continue;
2344 mode = ALLOCNO_MODE (a);
2345 n = ira_class_hard_regs_num[aclass];
2346 min_cost = INT_MAX;
2347 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2348 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2349 {
2350 ira_allocate_and_set_costs
2351 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2352 ALLOCNO_CLASS_COST (a));
2353 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2354 for (j = n - 1; j >= 0; j--)
2355 {
2356 regno = ira_class_hard_regs[aclass][j];
2357 skip_p = false;
2358 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2359 {
2360 if (ira_hard_reg_set_intersection_p (regno, mode,
2361 OBJECT_CONFLICT_HARD_REGS
2362 (obj)))
2363 {
2364 skip_p = true;
2365 break;
2366 }
2367 }
2368 if (skip_p)
2369 continue;
2370 rclass = REGNO_REG_CLASS (regno);
2371 cost = 0;
2372 if (ira_need_caller_save_p (a, regno))
2373 cost += (ALLOCNO_CALL_FREQ (a)
2374 * (ira_memory_move_cost[mode][rclass][0]
2375 + ira_memory_move_cost[mode][rclass][1]));
2376 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2377 cost += ((ira_memory_move_cost[mode][rclass][0]
2378 + ira_memory_move_cost[mode][rclass][1])
2379 * ALLOCNO_FREQ (a)
2380 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2381 #endif
2382 if (INT_MAX - cost < reg_costs[j])
2383 reg_costs[j] = INT_MAX;
2384 else
2385 reg_costs[j] += cost;
2386 if (min_cost > reg_costs[j])
2387 min_cost = reg_costs[j];
2388 }
2389 }
2390 if (min_cost != INT_MAX)
2391 ALLOCNO_CLASS_COST (a) = min_cost;
2392
2393 /* Some targets allow pseudos to be allocated to unaligned sequences
2394 of hard registers. However, selecting an unaligned sequence can
2395 unnecessarily restrict later allocations. So increase the cost of
2396 unaligned hard regs to encourage the use of aligned hard regs. */
2397 {
2398 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2399
2400 if (nregs > 1)
2401 {
2402 ira_allocate_and_set_costs
2403 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2404 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2405 for (j = n - 1; j >= 0; j--)
2406 {
2407 regno = ira_non_ordered_class_hard_regs[aclass][j];
2408 if ((regno % nregs) != 0)
2409 {
2410 int index = ira_class_hard_reg_index[aclass][regno];
2411 ira_assert (index != -1);
2412 reg_costs[index] += ALLOCNO_FREQ (a);
2413 }
2414 }
2415 }
2416 }
2417 }
2418 }
2419
2420 /* Add COST to the estimated gain for eliminating REGNO with its
2421 equivalence. If COST is zero, record that no such elimination is
2422 possible. */
2423
2424 void
2425 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2426 {
2427 if (cost == 0)
2428 regno_equiv_gains[regno] = 0;
2429 else
2430 regno_equiv_gains[regno] += cost;
2431 }
2432
2433 void
2434 ira_costs_c_finalize (void)
2435 {
2436 this_target_ira_int->free_ira_costs ();
2437 }