]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-costs.cc
PR middle-end/105604 - ICE: in tree_to_shwi with vla in struct and sprintf
[thirdparty/gcc.git] / gcc / ira-costs.cc
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2022 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
37
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
41
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
44
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
47
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
52 {
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
57 };
58
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
69
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
72
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
75
76 /* It is the current size of struct costs. */
77 static size_t struct_costs_size;
78
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
88
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
93
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
96
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
99
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
103
104 /* Execution frequency of the current insn. */
105 static int frequency;
106
107 \f
108
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
111 {
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
122 };
123
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
127
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
130
131 /* Helper for cost_classes hashing. */
132
133 struct cost_classes_hasher : pointer_hash <cost_classes>
134 {
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
138 };
139
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
143 {
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
145 }
146
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 {
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
154 }
155
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
159 {
160 ira_free (v);
161 }
162
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
165
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
175
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
180 {
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
186 {
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 {
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
194 }
195 }
196 }
197
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
201 {
202 int size = sizeof (cost_classes_t) * max_reg_num ();
203
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
215 }
216
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
224 {
225 cost_classes_t classes_ptr;
226
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
233 }
234
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const_hard_reg_set regs)
241 {
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
246 {
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
249
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
254
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl = reg_class_contents[cl] & regs;
258 valid_for_cl &= ~(ira_prohibited_class_mode_regs[cl][mode]
259 | ira_no_alloc_regs);
260 if (hard_reg_set_empty_p (valid_for_cl))
261 continue;
262
263 /* Don't use this class if the set of valid registers is a subset
264 of an existing class. For example, suppose we have two classes
265 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
266 that the mode changes allowed by FR_REGS are not as general as
267 the mode changes allowed by GR_REGS.
268
269 In this situation, the mode changes for GR_AND_FR_REGS could
270 either be seen as the union or the intersection of the mode
271 changes allowed by the two subclasses. The justification for
272 the union-based definition would be that, if you want a mode
273 change that's only allowed by GR_REGS, you can pick a register
274 from the GR_REGS subclass. The justification for the
275 intersection-based definition would be that every register
276 from the class would allow the mode change.
277
278 However, if we have a register that needs to be in GR_REGS,
279 using GR_AND_FR_REGS with the intersection-based definition
280 would be too pessimistic, since it would bring in restrictions
281 that only apply to FR_REGS. Conversely, if we have a register
282 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
283 union-based definition would lose the extra restrictions
284 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
285 for cases where GR_REGS and FP_REGS are both valid. */
286 int pos;
287 for (pos = 0; pos < narrow.num; ++pos)
288 {
289 enum reg_class cl2 = narrow.classes[pos];
290 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
291 break;
292 }
293 map[i] = pos;
294 if (pos == narrow.num)
295 {
296 /* If several classes are equivalent, prefer to use the one
297 that was chosen as the allocno class. */
298 enum reg_class cl2 = ira_allocno_class_translate[cl];
299 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
300 cl = cl2;
301 narrow.classes[narrow.num++] = cl;
302 }
303 }
304 if (narrow.num == full->num)
305 return full;
306
307 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
308 if (*slot == NULL)
309 {
310 cost_classes_t classes = setup_cost_classes (&narrow);
311 /* Map equivalent classes to the representative that we chose above. */
312 for (int i = 0; i < ira_important_classes_num; i++)
313 {
314 enum reg_class cl = ira_important_classes[i];
315 int index = full->index[cl];
316 if (index >= 0)
317 classes->index[cl] = map[index];
318 }
319 *slot = classes;
320 }
321 return *slot;
322 }
323
324 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
325 This function is used when we know an initial approximation of
326 allocno class of the pseudo already, e.g. on the second iteration
327 of class cost calculation or after class cost calculation in
328 register-pressure sensitive insn scheduling or register-pressure
329 sensitive loop-invariant motion. */
330 static void
331 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
332 {
333 static struct cost_classes classes;
334 cost_classes_t classes_ptr;
335 enum reg_class cl;
336 int i;
337 cost_classes **slot;
338 HARD_REG_SET temp, temp2;
339 bool exclude_p;
340
341 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
342 {
343 temp = reg_class_contents[aclass] & ~ira_no_alloc_regs;
344 /* We exclude classes from consideration which are subsets of
345 ACLASS only if ACLASS is an uniform class. */
346 exclude_p = ira_uniform_class_p[aclass];
347 classes.num = 0;
348 for (i = 0; i < ira_important_classes_num; i++)
349 {
350 cl = ira_important_classes[i];
351 if (exclude_p)
352 {
353 /* Exclude non-uniform classes which are subsets of
354 ACLASS. */
355 temp2 = reg_class_contents[cl] & ~ira_no_alloc_regs;
356 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
357 continue;
358 }
359 classes.classes[classes.num++] = cl;
360 }
361 slot = cost_classes_htab->find_slot (&classes, INSERT);
362 if (*slot == NULL)
363 {
364 classes_ptr = setup_cost_classes (&classes);
365 *slot = classes_ptr;
366 }
367 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
368 }
369 if (regno_reg_rtx[regno] != NULL_RTX)
370 {
371 /* Restrict the classes to those that are valid for REGNO's mode
372 (which might for example exclude singleton classes if the mode
373 requires two registers). Also restrict the classes to those that
374 are valid for subregs of REGNO. */
375 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
376 if (!valid_regs)
377 valid_regs = &reg_class_contents[ALL_REGS];
378 classes_ptr = restrict_cost_classes (classes_ptr,
379 PSEUDO_REGNO_MODE (regno),
380 *valid_regs);
381 }
382 regno_cost_classes[regno] = classes_ptr;
383 }
384
385 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
386 decrease number of cost classes for the pseudo, if hard registers
387 of some important classes cannot hold a value of MODE. So the
388 pseudo cannot get hard register of some important classes and cost
389 calculation for such important classes is only wasting CPU
390 time. */
391 static void
392 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
393 {
394 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
395 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
396 mode, *valid_regs);
397 else
398 {
399 if (cost_classes_mode_cache[mode] == NULL)
400 cost_classes_mode_cache[mode]
401 = restrict_cost_classes (&all_cost_classes, mode,
402 reg_class_contents[ALL_REGS]);
403 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
404 }
405 }
406
407 /* Finalize info about the cost classes for each pseudo. */
408 static void
409 finish_regno_cost_classes (void)
410 {
411 ira_free (regno_cost_classes);
412 delete cost_classes_htab;
413 cost_classes_htab = NULL;
414 }
415
416 \f
417
418 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
419 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
420 be a pseudo register. */
421 static int
422 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
423 secondary_reload_info *prev_sri)
424 {
425 secondary_reload_info sri;
426 reg_class_t secondary_class = NO_REGS;
427
428 /* If X is a SCRATCH, there is actually nothing to move since we are
429 assuming optimal allocation. */
430 if (GET_CODE (x) == SCRATCH)
431 return 0;
432
433 /* Get the class we will actually use for a reload. */
434 rclass = targetm.preferred_reload_class (x, rclass);
435
436 /* If we need a secondary reload for an intermediate, the cost is
437 that to load the input into the intermediate register, then to
438 copy it. */
439 sri.prev_sri = prev_sri;
440 sri.extra_cost = 0;
441 /* PR 68770: Secondary reload might examine the t_icode field. */
442 sri.t_icode = CODE_FOR_nothing;
443
444 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
445
446 if (secondary_class != NO_REGS)
447 {
448 ira_init_register_move_cost_if_necessary (mode);
449 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
450 + sri.extra_cost
451 + copy_cost (x, mode, secondary_class, to_p, &sri));
452 }
453
454 /* For memory, use the memory move cost, for (hard) registers, use
455 the cost to move between the register classes, and use 2 for
456 everything else (constants). */
457 if (MEM_P (x) || rclass == NO_REGS)
458 return sri.extra_cost
459 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
460 else if (REG_P (x))
461 {
462 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
463
464 ira_init_register_move_cost_if_necessary (mode);
465 return (sri.extra_cost
466 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
467 }
468 else
469 /* If this is a constant, we may eventually want to call rtx_cost
470 here. */
471 return sri.extra_cost + COSTS_N_INSNS (1);
472 }
473
474 \f
475
476 /* Record the cost of using memory or hard registers of various
477 classes for the operands in INSN.
478
479 N_ALTS is the number of alternatives.
480 N_OPS is the number of operands.
481 OPS is an array of the operands.
482 MODES are the modes of the operands, in case any are VOIDmode.
483 CONSTRAINTS are the constraints to use for the operands. This array
484 is modified by this procedure.
485
486 This procedure works alternative by alternative. For each
487 alternative we assume that we will be able to allocate all allocnos
488 to their ideal register class and calculate the cost of using that
489 alternative. Then we compute, for each operand that is a
490 pseudo-register, the cost of having the allocno allocated to each
491 register class and using it in that alternative. To this cost is
492 added the cost of the alternative.
493
494 The cost of each class for this insn is its lowest cost among all
495 the alternatives. */
496 static void
497 record_reg_classes (int n_alts, int n_ops, rtx *ops,
498 machine_mode *modes, const char **constraints,
499 rtx_insn *insn, enum reg_class *pref)
500 {
501 int alt;
502 int i, j, k;
503 int insn_allows_mem[MAX_RECOG_OPERANDS];
504 move_table *move_in_cost, *move_out_cost;
505 short (*mem_cost)[2];
506
507 for (i = 0; i < n_ops; i++)
508 insn_allows_mem[i] = 0;
509
510 /* Process each alternative, each time minimizing an operand's cost
511 with the cost for each operand in that alternative. */
512 alternative_mask preferred = get_preferred_alternatives (insn);
513 for (alt = 0; alt < n_alts; alt++)
514 {
515 enum reg_class classes[MAX_RECOG_OPERANDS];
516 int allows_mem[MAX_RECOG_OPERANDS];
517 enum reg_class rclass;
518 int alt_fail = 0;
519 int alt_cost = 0, op_cost_add;
520
521 if (!TEST_BIT (preferred, alt))
522 {
523 for (i = 0; i < recog_data.n_operands; i++)
524 constraints[i] = skip_alternative (constraints[i]);
525
526 continue;
527 }
528
529 for (i = 0; i < n_ops; i++)
530 {
531 unsigned char c;
532 const char *p = constraints[i];
533 rtx op = ops[i];
534 machine_mode mode = modes[i];
535 int allows_addr = 0;
536 int win = 0;
537
538 /* Initially show we know nothing about the register class. */
539 classes[i] = NO_REGS;
540 allows_mem[i] = 0;
541
542 /* If this operand has no constraints at all, we can
543 conclude nothing about it since anything is valid. */
544 if (*p == 0)
545 {
546 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
547 memset (this_op_costs[i], 0, struct_costs_size);
548 continue;
549 }
550
551 /* If this alternative is only relevant when this operand
552 matches a previous operand, we do different things
553 depending on whether this operand is a allocno-reg or not.
554 We must process any modifiers for the operand before we
555 can make this test. */
556 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
557 p++;
558
559 if (p[0] >= '0' && p[0] <= '0' + i)
560 {
561 /* Copy class and whether memory is allowed from the
562 matching alternative. Then perform any needed cost
563 computations and/or adjustments. */
564 j = p[0] - '0';
565 classes[i] = classes[j];
566 allows_mem[i] = allows_mem[j];
567 if (allows_mem[i])
568 insn_allows_mem[i] = 1;
569
570 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
571 {
572 /* If this matches the other operand, we have no
573 added cost and we win. */
574 if (rtx_equal_p (ops[j], op))
575 win = 1;
576 /* If we can put the other operand into a register,
577 add to the cost of this alternative the cost to
578 copy this operand to the register used for the
579 other operand. */
580 else if (classes[j] != NO_REGS)
581 {
582 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
583 win = 1;
584 }
585 }
586 else if (! REG_P (ops[j])
587 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
588 {
589 /* This op is an allocno but the one it matches is
590 not. */
591
592 /* If we can't put the other operand into a
593 register, this alternative can't be used. */
594
595 if (classes[j] == NO_REGS)
596 alt_fail = 1;
597 /* Otherwise, add to the cost of this alternative
598 the cost to copy the other operand to the hard
599 register used for this operand. */
600 else
601 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
602 }
603 else
604 {
605 /* The costs of this operand are not the same as the
606 other operand since move costs are not symmetric.
607 Moreover, if we cannot tie them, this alternative
608 needs to do a copy, which is one insn. */
609 struct costs *pp = this_op_costs[i];
610 int *pp_costs = pp->cost;
611 cost_classes_t cost_classes_ptr
612 = regno_cost_classes[REGNO (op)];
613 enum reg_class *cost_classes = cost_classes_ptr->classes;
614 bool in_p = recog_data.operand_type[i] != OP_OUT;
615 bool out_p = recog_data.operand_type[i] != OP_IN;
616 enum reg_class op_class = classes[i];
617
618 ira_init_register_move_cost_if_necessary (mode);
619 if (! in_p)
620 {
621 ira_assert (out_p);
622 if (op_class == NO_REGS)
623 {
624 mem_cost = ira_memory_move_cost[mode];
625 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
626 {
627 rclass = cost_classes[k];
628 pp_costs[k] = mem_cost[rclass][0] * frequency;
629 }
630 }
631 else
632 {
633 move_out_cost = ira_may_move_out_cost[mode];
634 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
635 {
636 rclass = cost_classes[k];
637 pp_costs[k]
638 = move_out_cost[op_class][rclass] * frequency;
639 }
640 }
641 }
642 else if (! out_p)
643 {
644 ira_assert (in_p);
645 if (op_class == NO_REGS)
646 {
647 mem_cost = ira_memory_move_cost[mode];
648 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
649 {
650 rclass = cost_classes[k];
651 pp_costs[k] = mem_cost[rclass][1] * frequency;
652 }
653 }
654 else
655 {
656 move_in_cost = ira_may_move_in_cost[mode];
657 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
658 {
659 rclass = cost_classes[k];
660 pp_costs[k]
661 = move_in_cost[rclass][op_class] * frequency;
662 }
663 }
664 }
665 else
666 {
667 if (op_class == NO_REGS)
668 {
669 mem_cost = ira_memory_move_cost[mode];
670 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
671 {
672 rclass = cost_classes[k];
673 pp_costs[k] = ((mem_cost[rclass][0]
674 + mem_cost[rclass][1])
675 * frequency);
676 }
677 }
678 else
679 {
680 move_in_cost = ira_may_move_in_cost[mode];
681 move_out_cost = ira_may_move_out_cost[mode];
682 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
683 {
684 rclass = cost_classes[k];
685 pp_costs[k] = ((move_in_cost[rclass][op_class]
686 + move_out_cost[op_class][rclass])
687 * frequency);
688 }
689 }
690 }
691
692 /* If the alternative actually allows memory, make
693 things a bit cheaper since we won't need an extra
694 insn to load it. */
695 pp->mem_cost
696 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
697 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
698 - allows_mem[i]) * frequency;
699
700 /* If we have assigned a class to this allocno in
701 our first pass, add a cost to this alternative
702 corresponding to what we would add if this
703 allocno were not in the appropriate class. */
704 if (pref)
705 {
706 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
707
708 if (pref_class == NO_REGS)
709 alt_cost
710 += ((out_p
711 ? ira_memory_move_cost[mode][op_class][0] : 0)
712 + (in_p
713 ? ira_memory_move_cost[mode][op_class][1]
714 : 0));
715 else if (ira_reg_class_intersect
716 [pref_class][op_class] == NO_REGS)
717 alt_cost
718 += ira_register_move_cost[mode][pref_class][op_class];
719 }
720 if (REGNO (ops[i]) != REGNO (ops[j])
721 && ! find_reg_note (insn, REG_DEAD, op))
722 alt_cost += 2;
723
724 p++;
725 }
726 }
727
728 /* Scan all the constraint letters. See if the operand
729 matches any of the constraints. Collect the valid
730 register classes and see if this operand accepts
731 memory. */
732 while ((c = *p))
733 {
734 switch (c)
735 {
736 case '*':
737 /* Ignore the next letter for this pass. */
738 c = *++p;
739 break;
740
741 case '^':
742 alt_cost += 2;
743 break;
744
745 case '?':
746 alt_cost += 2;
747 break;
748
749 case 'g':
750 if (MEM_P (op)
751 || (CONSTANT_P (op)
752 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
753 win = 1;
754 insn_allows_mem[i] = allows_mem[i] = 1;
755 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
756 break;
757
758 default:
759 enum constraint_num cn = lookup_constraint (p);
760 enum reg_class cl;
761 switch (get_constraint_type (cn))
762 {
763 case CT_REGISTER:
764 cl = reg_class_for_constraint (cn);
765 if (cl != NO_REGS)
766 classes[i] = ira_reg_class_subunion[classes[i]][cl];
767 break;
768
769 case CT_CONST_INT:
770 if (CONST_INT_P (op)
771 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
772 win = 1;
773 break;
774
775 case CT_MEMORY:
776 case CT_RELAXED_MEMORY:
777 /* Every MEM can be reloaded to fit. */
778 insn_allows_mem[i] = allows_mem[i] = 1;
779 if (MEM_P (op))
780 win = 1;
781 break;
782
783 case CT_SPECIAL_MEMORY:
784 insn_allows_mem[i] = allows_mem[i] = 1;
785 if (MEM_P (extract_mem_from_operand (op))
786 && constraint_satisfied_p (op, cn))
787 win = 1;
788 break;
789
790 case CT_ADDRESS:
791 /* Every address can be reloaded to fit. */
792 allows_addr = 1;
793 if (address_operand (op, GET_MODE (op))
794 || constraint_satisfied_p (op, cn))
795 win = 1;
796 /* We know this operand is an address, so we
797 want it to be allocated to a hard register
798 that can be the base of an address,
799 i.e. BASE_REG_CLASS. */
800 classes[i]
801 = ira_reg_class_subunion[classes[i]]
802 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
803 ADDRESS, SCRATCH)];
804 break;
805
806 case CT_FIXED_FORM:
807 if (constraint_satisfied_p (op, cn))
808 win = 1;
809 break;
810 }
811 break;
812 }
813 p += CONSTRAINT_LEN (c, p);
814 if (c == ',')
815 break;
816 }
817
818 constraints[i] = p;
819
820 if (alt_fail)
821 break;
822
823 /* How we account for this operand now depends on whether it
824 is a pseudo register or not. If it is, we first check if
825 any register classes are valid. If not, we ignore this
826 alternative, since we want to assume that all allocnos get
827 allocated for register preferencing. If some register
828 class is valid, compute the costs of moving the allocno
829 into that class. */
830 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
831 {
832 if (classes[i] == NO_REGS && ! allows_mem[i])
833 {
834 /* We must always fail if the operand is a REG, but
835 we did not find a suitable class and memory is
836 not allowed.
837
838 Otherwise we may perform an uninitialized read
839 from this_op_costs after the `continue' statement
840 below. */
841 alt_fail = 1;
842 }
843 else
844 {
845 unsigned int regno = REGNO (op);
846 struct costs *pp = this_op_costs[i];
847 int *pp_costs = pp->cost;
848 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
849 enum reg_class *cost_classes = cost_classes_ptr->classes;
850 bool in_p = recog_data.operand_type[i] != OP_OUT;
851 bool out_p = recog_data.operand_type[i] != OP_IN;
852 enum reg_class op_class = classes[i];
853
854 ira_init_register_move_cost_if_necessary (mode);
855 if (! in_p)
856 {
857 ira_assert (out_p);
858 if (op_class == NO_REGS)
859 {
860 mem_cost = ira_memory_move_cost[mode];
861 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
862 {
863 rclass = cost_classes[k];
864 pp_costs[k] = mem_cost[rclass][0] * frequency;
865 }
866 }
867 else
868 {
869 move_out_cost = ira_may_move_out_cost[mode];
870 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
871 {
872 rclass = cost_classes[k];
873 pp_costs[k]
874 = move_out_cost[op_class][rclass] * frequency;
875 }
876 }
877 }
878 else if (! out_p)
879 {
880 ira_assert (in_p);
881 if (op_class == NO_REGS)
882 {
883 mem_cost = ira_memory_move_cost[mode];
884 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
885 {
886 rclass = cost_classes[k];
887 pp_costs[k] = mem_cost[rclass][1] * frequency;
888 }
889 }
890 else
891 {
892 move_in_cost = ira_may_move_in_cost[mode];
893 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
894 {
895 rclass = cost_classes[k];
896 pp_costs[k]
897 = move_in_cost[rclass][op_class] * frequency;
898 }
899 }
900 }
901 else
902 {
903 if (op_class == NO_REGS)
904 {
905 mem_cost = ira_memory_move_cost[mode];
906 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
907 {
908 rclass = cost_classes[k];
909 pp_costs[k] = ((mem_cost[rclass][0]
910 + mem_cost[rclass][1])
911 * frequency);
912 }
913 }
914 else
915 {
916 move_in_cost = ira_may_move_in_cost[mode];
917 move_out_cost = ira_may_move_out_cost[mode];
918 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
919 {
920 rclass = cost_classes[k];
921 pp_costs[k] = ((move_in_cost[rclass][op_class]
922 + move_out_cost[op_class][rclass])
923 * frequency);
924 }
925 }
926 }
927
928 if (op_class == NO_REGS)
929 /* Although we don't need insn to reload from
930 memory, still accessing memory is usually more
931 expensive than a register. */
932 pp->mem_cost = frequency;
933 else
934 /* If the alternative actually allows memory, make
935 things a bit cheaper since we won't need an
936 extra insn to load it. */
937 pp->mem_cost
938 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
939 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
940 - allows_mem[i]) * frequency;
941 /* If we have assigned a class to this allocno in
942 our first pass, add a cost to this alternative
943 corresponding to what we would add if this
944 allocno were not in the appropriate class. */
945 if (pref)
946 {
947 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
948
949 if (pref_class == NO_REGS)
950 {
951 if (op_class != NO_REGS)
952 alt_cost
953 += ((out_p
954 ? ira_memory_move_cost[mode][op_class][0]
955 : 0)
956 + (in_p
957 ? ira_memory_move_cost[mode][op_class][1]
958 : 0));
959 }
960 else if (op_class == NO_REGS)
961 alt_cost
962 += ((out_p
963 ? ira_memory_move_cost[mode][pref_class][1]
964 : 0)
965 + (in_p
966 ? ira_memory_move_cost[mode][pref_class][0]
967 : 0));
968 else if (ira_reg_class_intersect[pref_class][op_class]
969 == NO_REGS)
970 alt_cost += (ira_register_move_cost
971 [mode][pref_class][op_class]);
972 }
973 }
974 }
975
976 /* Otherwise, if this alternative wins, either because we
977 have already determined that or if we have a hard
978 register of the proper class, there is no cost for this
979 alternative. */
980 else if (win || (REG_P (op)
981 && reg_fits_class_p (op, classes[i],
982 0, GET_MODE (op))))
983 ;
984
985 /* If registers are valid, the cost of this alternative
986 includes copying the object to and/or from a
987 register. */
988 else if (classes[i] != NO_REGS)
989 {
990 if (recog_data.operand_type[i] != OP_OUT)
991 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
992
993 if (recog_data.operand_type[i] != OP_IN)
994 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
995 }
996 /* The only other way this alternative can be used is if
997 this is a constant that could be placed into memory. */
998 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
999 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1000 else
1001 alt_fail = 1;
1002
1003 if (alt_fail)
1004 break;
1005 }
1006
1007 if (alt_fail)
1008 {
1009 /* The loop above might have exited early once the failure
1010 was seen. Skip over the constraints for the remaining
1011 operands. */
1012 i += 1;
1013 for (; i < n_ops; ++i)
1014 constraints[i] = skip_alternative (constraints[i]);
1015 continue;
1016 }
1017
1018 op_cost_add = alt_cost * frequency;
1019 /* Finally, update the costs with the information we've
1020 calculated about this alternative. */
1021 for (i = 0; i < n_ops; i++)
1022 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1023 {
1024 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1025 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1026 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1027 cost_classes_t cost_classes_ptr
1028 = regno_cost_classes[REGNO (ops[i])];
1029
1030 pp->mem_cost = MIN (pp->mem_cost,
1031 (qq->mem_cost + op_cost_add) * scale);
1032
1033 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1034 pp_costs[k]
1035 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1036 }
1037 }
1038
1039 if (allocno_p)
1040 for (i = 0; i < n_ops; i++)
1041 {
1042 ira_allocno_t a;
1043 rtx op = ops[i];
1044
1045 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1046 continue;
1047 a = ira_curr_regno_allocno_map [REGNO (op)];
1048 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1049 ALLOCNO_BAD_SPILL_P (a) = true;
1050 }
1051
1052 }
1053
1054 \f
1055
1056 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1057 static inline bool
1058 ok_for_index_p_nonstrict (rtx reg)
1059 {
1060 unsigned regno = REGNO (reg);
1061
1062 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1063 }
1064
1065 /* A version of regno_ok_for_base_p for use here, when all
1066 pseudo-registers should count as OK. Arguments as for
1067 regno_ok_for_base_p. */
1068 static inline bool
1069 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1070 enum rtx_code outer_code, enum rtx_code index_code)
1071 {
1072 unsigned regno = REGNO (reg);
1073
1074 if (regno >= FIRST_PSEUDO_REGISTER)
1075 return true;
1076 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1077 }
1078
1079 /* Record the pseudo registers we must reload into hard registers in a
1080 subexpression of a memory address, X.
1081
1082 If CONTEXT is 0, we are looking at the base part of an address,
1083 otherwise we are looking at the index part.
1084
1085 MODE and AS are the mode and address space of the memory reference;
1086 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1087 These four arguments are passed down to base_reg_class.
1088
1089 SCALE is twice the amount to multiply the cost by (it is twice so
1090 we can represent half-cost adjustments). */
1091 static void
1092 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1093 int context, enum rtx_code outer_code,
1094 enum rtx_code index_code, int scale)
1095 {
1096 enum rtx_code code = GET_CODE (x);
1097 enum reg_class rclass;
1098
1099 if (context == 1)
1100 rclass = INDEX_REG_CLASS;
1101 else
1102 rclass = base_reg_class (mode, as, outer_code, index_code);
1103
1104 switch (code)
1105 {
1106 case CONST_INT:
1107 case CONST:
1108 case PC:
1109 case SYMBOL_REF:
1110 case LABEL_REF:
1111 return;
1112
1113 case PLUS:
1114 /* When we have an address that is a sum, we must determine
1115 whether registers are "base" or "index" regs. If there is a
1116 sum of two registers, we must choose one to be the "base".
1117 Luckily, we can use the REG_POINTER to make a good choice
1118 most of the time. We only need to do this on machines that
1119 can have two registers in an address and where the base and
1120 index register classes are different.
1121
1122 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1123 but that seems bogus since it should only be set when we are
1124 sure the register is being used as a pointer. */
1125 {
1126 rtx arg0 = XEXP (x, 0);
1127 rtx arg1 = XEXP (x, 1);
1128 enum rtx_code code0 = GET_CODE (arg0);
1129 enum rtx_code code1 = GET_CODE (arg1);
1130
1131 /* Look inside subregs. */
1132 if (code0 == SUBREG)
1133 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1134 if (code1 == SUBREG)
1135 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1136
1137 /* If index registers do not appear, or coincide with base registers,
1138 just record registers in any non-constant operands. We
1139 assume here, as well as in the tests below, that all
1140 addresses are in canonical form. */
1141 if (MAX_REGS_PER_ADDRESS == 1
1142 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1143 {
1144 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1145 if (! CONSTANT_P (arg1))
1146 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1147 }
1148
1149 /* If the second operand is a constant integer, it doesn't
1150 change what class the first operand must be. */
1151 else if (CONST_SCALAR_INT_P (arg1))
1152 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1153 /* If the second operand is a symbolic constant, the first
1154 operand must be an index register. */
1155 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1156 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1157 /* If both operands are registers but one is already a hard
1158 register of index or reg-base class, give the other the
1159 class that the hard register is not. */
1160 else if (code0 == REG && code1 == REG
1161 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1162 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1163 || ok_for_index_p_nonstrict (arg0)))
1164 record_address_regs (mode, as, arg1,
1165 ok_for_base_p_nonstrict (arg0, mode, as,
1166 PLUS, REG) ? 1 : 0,
1167 PLUS, REG, scale);
1168 else if (code0 == REG && code1 == REG
1169 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1170 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1171 || ok_for_index_p_nonstrict (arg1)))
1172 record_address_regs (mode, as, arg0,
1173 ok_for_base_p_nonstrict (arg1, mode, as,
1174 PLUS, REG) ? 1 : 0,
1175 PLUS, REG, scale);
1176 /* If one operand is known to be a pointer, it must be the
1177 base with the other operand the index. Likewise if the
1178 other operand is a MULT. */
1179 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1180 {
1181 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1182 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1183 }
1184 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1185 {
1186 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1187 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1188 }
1189 /* Otherwise, count equal chances that each might be a base or
1190 index register. This case should be rare. */
1191 else
1192 {
1193 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1194 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1195 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1196 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1197 }
1198 }
1199 break;
1200
1201 /* Double the importance of an allocno that is incremented or
1202 decremented, since it would take two extra insns if it ends
1203 up in the wrong place. */
1204 case POST_MODIFY:
1205 case PRE_MODIFY:
1206 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1207 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1208 if (REG_P (XEXP (XEXP (x, 1), 1)))
1209 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1210 2 * scale);
1211 break;
1212
1213 case POST_INC:
1214 case PRE_INC:
1215 case POST_DEC:
1216 case PRE_DEC:
1217 /* Double the importance of an allocno that is incremented or
1218 decremented, since it would take two extra insns if it ends
1219 up in the wrong place. */
1220 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1221 break;
1222
1223 case REG:
1224 {
1225 struct costs *pp;
1226 int *pp_costs;
1227 enum reg_class i;
1228 int k, regno, add_cost;
1229 cost_classes_t cost_classes_ptr;
1230 enum reg_class *cost_classes;
1231 move_table *move_in_cost;
1232
1233 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1234 break;
1235
1236 regno = REGNO (x);
1237 if (allocno_p)
1238 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1239 pp = COSTS (costs, COST_INDEX (regno));
1240 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1241 if (INT_MAX - add_cost < pp->mem_cost)
1242 pp->mem_cost = INT_MAX;
1243 else
1244 pp->mem_cost += add_cost;
1245 cost_classes_ptr = regno_cost_classes[regno];
1246 cost_classes = cost_classes_ptr->classes;
1247 pp_costs = pp->cost;
1248 ira_init_register_move_cost_if_necessary (Pmode);
1249 move_in_cost = ira_may_move_in_cost[Pmode];
1250 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1251 {
1252 i = cost_classes[k];
1253 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1254 if (INT_MAX - add_cost < pp_costs[k])
1255 pp_costs[k] = INT_MAX;
1256 else
1257 pp_costs[k] += add_cost;
1258 }
1259 }
1260 break;
1261
1262 default:
1263 {
1264 const char *fmt = GET_RTX_FORMAT (code);
1265 int i;
1266 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1267 if (fmt[i] == 'e')
1268 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1269 scale);
1270 }
1271 }
1272 }
1273
1274 \f
1275
1276 /* Calculate the costs of insn operands. */
1277 static void
1278 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1279 {
1280 const char *constraints[MAX_RECOG_OPERANDS];
1281 machine_mode modes[MAX_RECOG_OPERANDS];
1282 rtx set;
1283 int i;
1284
1285 if ((set = single_set (insn)) != NULL_RTX
1286 /* In rare cases the single set insn might have less 2 operands
1287 as the source can be a fixed special reg. */
1288 && recog_data.n_operands > 1
1289 && recog_data.operand[0] == SET_DEST (set)
1290 && recog_data.operand[1] == SET_SRC (set))
1291 {
1292 int regno, other_regno;
1293 rtx dest = SET_DEST (set);
1294 rtx src = SET_SRC (set);
1295
1296 if (GET_CODE (dest) == SUBREG
1297 && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1298 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1299 dest = SUBREG_REG (dest);
1300 if (GET_CODE (src) == SUBREG
1301 && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1302 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1303 src = SUBREG_REG (src);
1304 if (REG_P (src) && REG_P (dest)
1305 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1306 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1307 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1308 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1309 {
1310 machine_mode mode = GET_MODE (SET_SRC (set));
1311 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1312 enum reg_class *cost_classes = cost_classes_ptr->classes;
1313 reg_class_t rclass, hard_reg_class, bigger_hard_reg_class;
1314 int cost, k;
1315 move_table *move_costs;
1316 bool dead_p = find_regno_note (insn, REG_DEAD, REGNO (src));
1317
1318 ira_init_register_move_cost_if_necessary (mode);
1319 move_costs = ira_register_move_cost[mode];
1320 hard_reg_class = REGNO_REG_CLASS (other_regno);
1321 bigger_hard_reg_class = ira_pressure_class_translate[hard_reg_class];
1322 /* Target code may return any cost for mode which does not
1323 fit the hard reg class (e.g. DImode for AREG on
1324 i386). Check this and use a bigger class to get the
1325 right cost. */
1326 if (bigger_hard_reg_class != NO_REGS
1327 && ! ira_hard_reg_in_set_p (other_regno, mode,
1328 reg_class_contents[hard_reg_class]))
1329 hard_reg_class = bigger_hard_reg_class;
1330 i = regno == (int) REGNO (src) ? 1 : 0;
1331 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1332 {
1333 rclass = cost_classes[k];
1334 cost = (i == 0
1335 ? move_costs[hard_reg_class][rclass]
1336 : move_costs[rclass][hard_reg_class]);
1337
1338 op_costs[i]->cost[k] = cost * frequency;
1339 /* If this insn is a single set copying operand 1 to
1340 operand 0 and one operand is an allocno with the
1341 other a hard reg or an allocno that prefers a hard
1342 register that is in its own register class then we
1343 may want to adjust the cost of that register class to
1344 -1.
1345
1346 Avoid the adjustment if the source does not die to
1347 avoid stressing of register allocator by preferencing
1348 two colliding registers into single class. */
1349 if (dead_p
1350 && TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1351 && (reg_class_size[(int) rclass]
1352 == (ira_reg_class_max_nregs
1353 [(int) rclass][(int) GET_MODE(src)])))
1354 {
1355 if (reg_class_size[rclass] == 1)
1356 op_costs[i]->cost[k] = -frequency;
1357 else if (in_hard_reg_set_p (reg_class_contents[rclass],
1358 GET_MODE(src), other_regno))
1359 op_costs[i]->cost[k] = -frequency;
1360 }
1361 }
1362 op_costs[i]->mem_cost
1363 = ira_memory_move_cost[mode][hard_reg_class][i] * frequency;
1364 return;
1365 }
1366 }
1367
1368 for (i = 0; i < recog_data.n_operands; i++)
1369 {
1370 constraints[i] = recog_data.constraints[i];
1371 modes[i] = recog_data.operand_mode[i];
1372 }
1373
1374 /* If we get here, we are set up to record the costs of all the
1375 operands for this insn. Start by initializing the costs. Then
1376 handle any address registers. Finally record the desired classes
1377 for any allocnos, doing it twice if some pair of operands are
1378 commutative. */
1379 for (i = 0; i < recog_data.n_operands; i++)
1380 {
1381 rtx op_mem = extract_mem_from_operand (recog_data.operand[i]);
1382 memcpy (op_costs[i], init_cost, struct_costs_size);
1383
1384 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1385 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1386
1387 if (MEM_P (op_mem))
1388 record_address_regs (GET_MODE (op_mem),
1389 MEM_ADDR_SPACE (op_mem),
1390 XEXP (op_mem, 0),
1391 0, MEM, SCRATCH, frequency * 2);
1392 else if (constraints[i][0] == 'p'
1393 || (insn_extra_address_constraint
1394 (lookup_constraint (constraints[i]))))
1395 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1396 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1397 frequency * 2);
1398 }
1399
1400 /* Check for commutative in a separate loop so everything will have
1401 been initialized. We must do this even if one operand is a
1402 constant--see addsi3 in m68k.md. */
1403 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1404 if (constraints[i][0] == '%')
1405 {
1406 const char *xconstraints[MAX_RECOG_OPERANDS];
1407 int j;
1408
1409 /* Handle commutative operands by swapping the
1410 constraints. We assume the modes are the same. */
1411 for (j = 0; j < recog_data.n_operands; j++)
1412 xconstraints[j] = constraints[j];
1413
1414 xconstraints[i] = constraints[i+1];
1415 xconstraints[i+1] = constraints[i];
1416 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1417 recog_data.operand, modes,
1418 xconstraints, insn, pref);
1419 }
1420 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1421 recog_data.operand, modes,
1422 constraints, insn, pref);
1423 }
1424
1425 \f
1426
1427 /* Process one insn INSN. Scan it and record each time it would save
1428 code to put a certain allocnos in a certain class. Return the last
1429 insn processed, so that the scan can be continued from there. */
1430 static rtx_insn *
1431 scan_one_insn (rtx_insn *insn)
1432 {
1433 enum rtx_code pat_code;
1434 rtx set, note;
1435 int i, k;
1436 bool counted_mem;
1437
1438 if (!NONDEBUG_INSN_P (insn))
1439 return insn;
1440
1441 pat_code = GET_CODE (PATTERN (insn));
1442 if (pat_code == ASM_INPUT)
1443 return insn;
1444
1445 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1446 and initialize the register move costs of mode M.
1447
1448 The pseudo may be related to another pseudo via a copy (implicit or
1449 explicit) and if there are no mode M uses/sets of the original
1450 pseudo, then we may leave the register move costs uninitialized for
1451 mode M. */
1452 if (pat_code == USE || pat_code == CLOBBER)
1453 {
1454 rtx x = XEXP (PATTERN (insn), 0);
1455 if (GET_CODE (x) == REG
1456 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1457 && have_regs_of_mode[GET_MODE (x)])
1458 ira_init_register_move_cost_if_necessary (GET_MODE (x));
1459 return insn;
1460 }
1461
1462 counted_mem = false;
1463 set = single_set (insn);
1464 extract_insn (insn);
1465
1466 /* If this insn loads a parameter from its stack slot, then it
1467 represents a savings, rather than a cost, if the parameter is
1468 stored in memory. Record this fact.
1469
1470 Similarly if we're loading other constants from memory (constant
1471 pool, TOC references, small data areas, etc) and this is the only
1472 assignment to the destination pseudo.
1473
1474 Don't do this if SET_SRC (set) isn't a general operand, if it is
1475 a memory requiring special instructions to load it, decreasing
1476 mem_cost might result in it being loaded using the specialized
1477 instruction into a register, then stored into stack and loaded
1478 again from the stack. See PR52208.
1479
1480 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1481 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1482 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1483 && ((MEM_P (XEXP (note, 0))
1484 && !side_effects_p (SET_SRC (set)))
1485 || (CONSTANT_P (XEXP (note, 0))
1486 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1487 XEXP (note, 0))
1488 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1489 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1490 /* LRA does not use equiv with a symbol for PIC code. */
1491 && (! ira_use_lra_p || ! pic_offset_table_rtx
1492 || ! contains_symbol_ref_p (XEXP (note, 0))))
1493 {
1494 enum reg_class cl = GENERAL_REGS;
1495 rtx reg = SET_DEST (set);
1496 int num = COST_INDEX (REGNO (reg));
1497
1498 COSTS (costs, num)->mem_cost
1499 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1500 record_address_regs (GET_MODE (SET_SRC (set)),
1501 MEM_ADDR_SPACE (SET_SRC (set)),
1502 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1503 frequency * 2);
1504 counted_mem = true;
1505 }
1506
1507 record_operand_costs (insn, pref);
1508
1509 /* Now add the cost for each operand to the total costs for its
1510 allocno. */
1511 for (i = 0; i < recog_data.n_operands; i++)
1512 {
1513 rtx op = recog_data.operand[i];
1514
1515 if (GET_CODE (op) == SUBREG)
1516 op = SUBREG_REG (op);
1517 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1518 {
1519 int regno = REGNO (op);
1520 struct costs *p = COSTS (costs, COST_INDEX (regno));
1521 struct costs *q = op_costs[i];
1522 int *p_costs = p->cost, *q_costs = q->cost;
1523 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1524 int add_cost;
1525
1526 /* If the already accounted for the memory "cost" above, don't
1527 do so again. */
1528 if (!counted_mem)
1529 {
1530 add_cost = q->mem_cost;
1531 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1532 p->mem_cost = INT_MAX;
1533 else
1534 p->mem_cost += add_cost;
1535 }
1536 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1537 {
1538 add_cost = q_costs[k];
1539 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1540 p_costs[k] = INT_MAX;
1541 else
1542 p_costs[k] += add_cost;
1543 }
1544 }
1545 }
1546 return insn;
1547 }
1548
1549 \f
1550
1551 /* Print allocnos costs to file F. */
1552 static void
1553 print_allocno_costs (FILE *f)
1554 {
1555 int k;
1556 ira_allocno_t a;
1557 ira_allocno_iterator ai;
1558
1559 ira_assert (allocno_p);
1560 fprintf (f, "\n");
1561 FOR_EACH_ALLOCNO (a, ai)
1562 {
1563 int i, rclass;
1564 basic_block bb;
1565 int regno = ALLOCNO_REGNO (a);
1566 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1567 enum reg_class *cost_classes = cost_classes_ptr->classes;
1568
1569 i = ALLOCNO_NUM (a);
1570 fprintf (f, " a%d(r%d,", i, regno);
1571 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1572 fprintf (f, "b%d", bb->index);
1573 else
1574 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1575 fprintf (f, ") costs:");
1576 for (k = 0; k < cost_classes_ptr->num; k++)
1577 {
1578 rclass = cost_classes[k];
1579 fprintf (f, " %s:%d", reg_class_names[rclass],
1580 COSTS (costs, i)->cost[k]);
1581 if (flag_ira_region == IRA_REGION_ALL
1582 || flag_ira_region == IRA_REGION_MIXED)
1583 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1584 }
1585 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1586 if (flag_ira_region == IRA_REGION_ALL
1587 || flag_ira_region == IRA_REGION_MIXED)
1588 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1589 fprintf (f, "\n");
1590 }
1591 }
1592
1593 /* Print pseudo costs to file F. */
1594 static void
1595 print_pseudo_costs (FILE *f)
1596 {
1597 int regno, k;
1598 int rclass;
1599 cost_classes_t cost_classes_ptr;
1600 enum reg_class *cost_classes;
1601
1602 ira_assert (! allocno_p);
1603 fprintf (f, "\n");
1604 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1605 {
1606 if (REG_N_REFS (regno) <= 0)
1607 continue;
1608 cost_classes_ptr = regno_cost_classes[regno];
1609 cost_classes = cost_classes_ptr->classes;
1610 fprintf (f, " r%d costs:", regno);
1611 for (k = 0; k < cost_classes_ptr->num; k++)
1612 {
1613 rclass = cost_classes[k];
1614 fprintf (f, " %s:%d", reg_class_names[rclass],
1615 COSTS (costs, regno)->cost[k]);
1616 }
1617 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1618 }
1619 }
1620
1621 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1622 costs. */
1623 static void
1624 process_bb_for_costs (basic_block bb)
1625 {
1626 rtx_insn *insn;
1627
1628 frequency = REG_FREQ_FROM_BB (bb);
1629 if (frequency == 0)
1630 frequency = 1;
1631 FOR_BB_INSNS (bb, insn)
1632 insn = scan_one_insn (insn);
1633 }
1634
1635 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1636 costs. */
1637 static void
1638 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1639 {
1640 basic_block bb;
1641
1642 bb = loop_tree_node->bb;
1643 if (bb != NULL)
1644 process_bb_for_costs (bb);
1645 }
1646
1647 /* Find costs of register classes and memory for allocnos or pseudos
1648 and their best costs. Set up preferred, alternative and allocno
1649 classes for pseudos. */
1650 static void
1651 find_costs_and_classes (FILE *dump_file)
1652 {
1653 int i, k, start, max_cost_classes_num;
1654 int pass;
1655 basic_block bb;
1656 enum reg_class *regno_best_class, new_class;
1657
1658 init_recog ();
1659 regno_best_class
1660 = (enum reg_class *) ira_allocate (max_reg_num ()
1661 * sizeof (enum reg_class));
1662 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1663 regno_best_class[i] = NO_REGS;
1664 if (!resize_reg_info () && allocno_p
1665 && pseudo_classes_defined_p && flag_expensive_optimizations)
1666 {
1667 ira_allocno_t a;
1668 ira_allocno_iterator ai;
1669
1670 pref = pref_buffer;
1671 max_cost_classes_num = 1;
1672 FOR_EACH_ALLOCNO (a, ai)
1673 {
1674 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1675 setup_regno_cost_classes_by_aclass
1676 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1677 max_cost_classes_num
1678 = MAX (max_cost_classes_num,
1679 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1680 }
1681 start = 1;
1682 }
1683 else
1684 {
1685 pref = NULL;
1686 max_cost_classes_num = ira_important_classes_num;
1687 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1688 if (regno_reg_rtx[i] != NULL_RTX)
1689 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1690 else
1691 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1692 start = 0;
1693 }
1694 if (allocno_p)
1695 /* Clear the flag for the next compiled function. */
1696 pseudo_classes_defined_p = false;
1697 /* Normally we scan the insns once and determine the best class to
1698 use for each allocno. However, if -fexpensive-optimizations are
1699 on, we do so twice, the second time using the tentative best
1700 classes to guide the selection. */
1701 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1702 {
1703 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1704 fprintf (dump_file,
1705 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1706
1707 if (pass != start)
1708 {
1709 max_cost_classes_num = 1;
1710 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1711 {
1712 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1713 max_cost_classes_num
1714 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1715 }
1716 }
1717
1718 struct_costs_size
1719 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1720 /* Zero out our accumulation of the cost of each class for each
1721 allocno. */
1722 memset (costs, 0, cost_elements_num * struct_costs_size);
1723
1724 if (allocno_p)
1725 {
1726 /* Scan the instructions and record each time it would save code
1727 to put a certain allocno in a certain class. */
1728 ira_traverse_loop_tree (true, ira_loop_tree_root,
1729 process_bb_node_for_costs, NULL);
1730
1731 memcpy (total_allocno_costs, costs,
1732 max_struct_costs_size * ira_allocnos_num);
1733 }
1734 else
1735 {
1736 basic_block bb;
1737
1738 FOR_EACH_BB_FN (bb, cfun)
1739 process_bb_for_costs (bb);
1740 }
1741
1742 if (pass == 0)
1743 pref = pref_buffer;
1744
1745 /* Now for each allocno look at how desirable each class is and
1746 find which class is preferred. */
1747 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1748 {
1749 ira_allocno_t a, parent_a;
1750 int rclass, a_num, parent_a_num, add_cost;
1751 ira_loop_tree_node_t parent;
1752 int best_cost, allocno_cost;
1753 enum reg_class best, alt_class;
1754 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1755 enum reg_class *cost_classes;
1756 int *i_costs = temp_costs->cost;
1757 int i_mem_cost;
1758 int equiv_savings = regno_equiv_gains[i];
1759
1760 if (! allocno_p)
1761 {
1762 if (regno_reg_rtx[i] == NULL_RTX)
1763 continue;
1764 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1765 i_mem_cost = temp_costs->mem_cost;
1766 cost_classes = cost_classes_ptr->classes;
1767 }
1768 else
1769 {
1770 if (ira_regno_allocno_map[i] == NULL)
1771 continue;
1772 memset (temp_costs, 0, struct_costs_size);
1773 i_mem_cost = 0;
1774 cost_classes = cost_classes_ptr->classes;
1775 /* Find cost of all allocnos with the same regno. */
1776 for (a = ira_regno_allocno_map[i];
1777 a != NULL;
1778 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1779 {
1780 int *a_costs, *p_costs;
1781
1782 a_num = ALLOCNO_NUM (a);
1783 if ((flag_ira_region == IRA_REGION_ALL
1784 || flag_ira_region == IRA_REGION_MIXED)
1785 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1786 && (parent_a = parent->regno_allocno_map[i]) != NULL
1787 /* There are no caps yet. */
1788 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1789 (a)->border_allocnos,
1790 ALLOCNO_NUM (a)))
1791 {
1792 /* Propagate costs to upper levels in the region
1793 tree. */
1794 parent_a_num = ALLOCNO_NUM (parent_a);
1795 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1796 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1797 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1798 {
1799 add_cost = a_costs[k];
1800 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1801 p_costs[k] = INT_MAX;
1802 else
1803 p_costs[k] += add_cost;
1804 }
1805 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1806 if (add_cost > 0
1807 && (INT_MAX - add_cost
1808 < COSTS (total_allocno_costs,
1809 parent_a_num)->mem_cost))
1810 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1811 = INT_MAX;
1812 else
1813 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1814 += add_cost;
1815
1816 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1817 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1818 }
1819 a_costs = COSTS (costs, a_num)->cost;
1820 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1821 {
1822 add_cost = a_costs[k];
1823 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1824 i_costs[k] = INT_MAX;
1825 else
1826 i_costs[k] += add_cost;
1827 }
1828 add_cost = COSTS (costs, a_num)->mem_cost;
1829 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1830 i_mem_cost = INT_MAX;
1831 else
1832 i_mem_cost += add_cost;
1833 }
1834 }
1835 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1836 i_mem_cost = 0;
1837 else if (equiv_savings < 0)
1838 i_mem_cost = -equiv_savings;
1839 else if (equiv_savings > 0)
1840 {
1841 i_mem_cost = 0;
1842 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1843 i_costs[k] += equiv_savings;
1844 }
1845
1846 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1847 best = ALL_REGS;
1848 alt_class = NO_REGS;
1849 /* Find best common class for all allocnos with the same
1850 regno. */
1851 for (k = 0; k < cost_classes_ptr->num; k++)
1852 {
1853 rclass = cost_classes[k];
1854 if (i_costs[k] < best_cost)
1855 {
1856 best_cost = i_costs[k];
1857 best = (enum reg_class) rclass;
1858 }
1859 else if (i_costs[k] == best_cost)
1860 best = ira_reg_class_subunion[best][rclass];
1861 if (pass == flag_expensive_optimizations
1862 /* We still prefer registers to memory even at this
1863 stage if their costs are the same. We will make
1864 a final decision during assigning hard registers
1865 when we have all info including more accurate
1866 costs which might be affected by assigning hard
1867 registers to other pseudos because the pseudos
1868 involved in moves can be coalesced. */
1869 && i_costs[k] <= i_mem_cost
1870 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1871 > reg_class_size[alt_class]))
1872 alt_class = reg_class_subunion[alt_class][rclass];
1873 }
1874 alt_class = ira_allocno_class_translate[alt_class];
1875 if (best_cost > i_mem_cost
1876 && ! non_spilled_static_chain_regno_p (i))
1877 regno_aclass[i] = NO_REGS;
1878 else if (!optimize && !targetm.class_likely_spilled_p (best))
1879 /* Registers in the alternative class are likely to need
1880 longer or slower sequences than registers in the best class.
1881 When optimizing we make some effort to use the best class
1882 over the alternative class where possible, but at -O0 we
1883 effectively give the alternative class equal weight.
1884 We then run the risk of using slower alternative registers
1885 when plenty of registers from the best class are still free.
1886 This is especially true because live ranges tend to be very
1887 short in -O0 code and so register pressure tends to be low.
1888
1889 Avoid that by ignoring the alternative class if the best
1890 class has plenty of registers.
1891
1892 The union class arrays give important classes and only
1893 part of it are allocno classes. So translate them into
1894 allocno classes. */
1895 regno_aclass[i] = ira_allocno_class_translate[best];
1896 else
1897 {
1898 /* Make the common class the biggest class of best and
1899 alt_class. Translate the common class into an
1900 allocno class too. */
1901 regno_aclass[i] = (ira_allocno_class_translate
1902 [ira_reg_class_superunion[best][alt_class]]);
1903 ira_assert (regno_aclass[i] != NO_REGS
1904 && ira_reg_allocno_class_p[regno_aclass[i]]);
1905 }
1906 if ((new_class
1907 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1908 (i, regno_aclass[i], best))) != regno_aclass[i])
1909 {
1910 regno_aclass[i] = new_class;
1911 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1912 reg_class_contents[best]))
1913 best = new_class;
1914 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1915 reg_class_contents[alt_class]))
1916 alt_class = new_class;
1917 }
1918 if (pass == flag_expensive_optimizations)
1919 {
1920 if (best_cost > i_mem_cost
1921 /* Do not assign NO_REGS to static chain pointer
1922 pseudo when non-local goto is used. */
1923 && ! non_spilled_static_chain_regno_p (i))
1924 best = alt_class = NO_REGS;
1925 else if (best == alt_class)
1926 alt_class = NO_REGS;
1927 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1928 if ((!allocno_p || internal_flag_ira_verbose > 2)
1929 && dump_file != NULL)
1930 fprintf (dump_file,
1931 " r%d: preferred %s, alternative %s, allocno %s\n",
1932 i, reg_class_names[best], reg_class_names[alt_class],
1933 reg_class_names[regno_aclass[i]]);
1934 }
1935 regno_best_class[i] = best;
1936 if (! allocno_p)
1937 {
1938 pref[i] = (best_cost > i_mem_cost
1939 && ! non_spilled_static_chain_regno_p (i)
1940 ? NO_REGS : best);
1941 continue;
1942 }
1943 for (a = ira_regno_allocno_map[i];
1944 a != NULL;
1945 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1946 {
1947 enum reg_class aclass = regno_aclass[i];
1948 int a_num = ALLOCNO_NUM (a);
1949 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1950 int *a_costs = COSTS (costs, a_num)->cost;
1951
1952 if (aclass == NO_REGS)
1953 best = NO_REGS;
1954 else
1955 {
1956 /* Finding best class which is subset of the common
1957 class. */
1958 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1959 allocno_cost = best_cost;
1960 best = ALL_REGS;
1961 for (k = 0; k < cost_classes_ptr->num; k++)
1962 {
1963 rclass = cost_classes[k];
1964 if (! ira_class_subset_p[rclass][aclass])
1965 continue;
1966 if (total_a_costs[k] < best_cost)
1967 {
1968 best_cost = total_a_costs[k];
1969 allocno_cost = a_costs[k];
1970 best = (enum reg_class) rclass;
1971 }
1972 else if (total_a_costs[k] == best_cost)
1973 {
1974 best = ira_reg_class_subunion[best][rclass];
1975 allocno_cost = MAX (allocno_cost, a_costs[k]);
1976 }
1977 }
1978 ALLOCNO_CLASS_COST (a) = allocno_cost;
1979 }
1980 if (internal_flag_ira_verbose > 2 && dump_file != NULL
1981 && (pass == 0 || pref[a_num] != best))
1982 {
1983 fprintf (dump_file, " a%d (r%d,", a_num, i);
1984 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1985 fprintf (dump_file, "b%d", bb->index);
1986 else
1987 fprintf (dump_file, "l%d",
1988 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1989 fprintf (dump_file, ") best %s, allocno %s\n",
1990 reg_class_names[best],
1991 reg_class_names[aclass]);
1992 }
1993 pref[a_num] = best;
1994 if (pass == flag_expensive_optimizations && best != aclass
1995 && ira_class_hard_regs_num[best] > 0
1996 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1997 >= ira_class_hard_regs_num[best]))
1998 {
1999 int ind = cost_classes_ptr->index[aclass];
2000
2001 ira_assert (ind >= 0);
2002 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
2003 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
2004 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
2005 / (ira_register_move_cost
2006 [ALLOCNO_MODE (a)][best][aclass]));
2007 for (k = 0; k < cost_classes_ptr->num; k++)
2008 if (ira_class_subset_p[cost_classes[k]][best])
2009 a_costs[k] = a_costs[ind];
2010 }
2011 }
2012 }
2013
2014 if (internal_flag_ira_verbose > 4 && dump_file)
2015 {
2016 if (allocno_p)
2017 print_allocno_costs (dump_file);
2018 else
2019 print_pseudo_costs (dump_file);
2020 fprintf (dump_file,"\n");
2021 }
2022 }
2023 ira_free (regno_best_class);
2024 }
2025
2026 \f
2027
2028 /* Process moves involving hard regs to modify allocno hard register
2029 costs. We can do this only after determining allocno class. If a
2030 hard register forms a register class, then moves with the hard
2031 register are already taken into account in class costs for the
2032 allocno. */
2033 static void
2034 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2035 {
2036 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2037 bool to_p;
2038 ira_allocno_t a, curr_a;
2039 ira_loop_tree_node_t curr_loop_tree_node;
2040 enum reg_class rclass;
2041 basic_block bb;
2042 rtx_insn *insn;
2043 rtx set, src, dst;
2044
2045 bb = loop_tree_node->bb;
2046 if (bb == NULL)
2047 return;
2048 freq = REG_FREQ_FROM_BB (bb);
2049 if (freq == 0)
2050 freq = 1;
2051 FOR_BB_INSNS (bb, insn)
2052 {
2053 if (!NONDEBUG_INSN_P (insn))
2054 continue;
2055 set = single_set (insn);
2056 if (set == NULL_RTX)
2057 continue;
2058 dst = SET_DEST (set);
2059 src = SET_SRC (set);
2060 if (! REG_P (dst) || ! REG_P (src))
2061 continue;
2062 dst_regno = REGNO (dst);
2063 src_regno = REGNO (src);
2064 if (dst_regno >= FIRST_PSEUDO_REGISTER
2065 && src_regno < FIRST_PSEUDO_REGISTER)
2066 {
2067 hard_regno = src_regno;
2068 a = ira_curr_regno_allocno_map[dst_regno];
2069 to_p = true;
2070 }
2071 else if (src_regno >= FIRST_PSEUDO_REGISTER
2072 && dst_regno < FIRST_PSEUDO_REGISTER)
2073 {
2074 hard_regno = dst_regno;
2075 a = ira_curr_regno_allocno_map[src_regno];
2076 to_p = false;
2077 }
2078 else
2079 continue;
2080 if (reg_class_size[(int) REGNO_REG_CLASS (hard_regno)]
2081 == (ira_reg_class_max_nregs
2082 [REGNO_REG_CLASS (hard_regno)][(int) ALLOCNO_MODE(a)]))
2083 /* If the class can provide only one hard reg to the allocno,
2084 we processed the insn record_operand_costs already and we
2085 actually updated the hard reg cost there. */
2086 continue;
2087 rclass = ALLOCNO_CLASS (a);
2088 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2089 continue;
2090 i = ira_class_hard_reg_index[rclass][hard_regno];
2091 if (i < 0)
2092 continue;
2093 a_regno = ALLOCNO_REGNO (a);
2094 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2095 curr_loop_tree_node != NULL;
2096 curr_loop_tree_node = curr_loop_tree_node->parent)
2097 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2098 ira_add_allocno_pref (curr_a, hard_regno, freq);
2099 {
2100 int cost;
2101 enum reg_class hard_reg_class;
2102 machine_mode mode;
2103
2104 mode = ALLOCNO_MODE (a);
2105 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2106 ira_init_register_move_cost_if_necessary (mode);
2107 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2108 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2109 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2110 ALLOCNO_CLASS_COST (a));
2111 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2112 rclass, 0);
2113 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2114 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2115 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2116 ALLOCNO_HARD_REG_COSTS (a)[i]);
2117 }
2118 }
2119 }
2120
2121 /* After we find hard register and memory costs for allocnos, define
2122 its class and modify hard register cost because insns moving
2123 allocno to/from hard registers. */
2124 static void
2125 setup_allocno_class_and_costs (void)
2126 {
2127 int i, j, n, regno, hard_regno, num;
2128 int *reg_costs;
2129 enum reg_class aclass, rclass;
2130 ira_allocno_t a;
2131 ira_allocno_iterator ai;
2132 cost_classes_t cost_classes_ptr;
2133
2134 ira_assert (allocno_p);
2135 FOR_EACH_ALLOCNO (a, ai)
2136 {
2137 i = ALLOCNO_NUM (a);
2138 regno = ALLOCNO_REGNO (a);
2139 aclass = regno_aclass[regno];
2140 cost_classes_ptr = regno_cost_classes[regno];
2141 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2142 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2143 ira_set_allocno_class (a, aclass);
2144 if (aclass == NO_REGS)
2145 continue;
2146 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2147 {
2148 n = ira_class_hard_regs_num[aclass];
2149 ALLOCNO_HARD_REG_COSTS (a)
2150 = reg_costs = ira_allocate_cost_vector (aclass);
2151 for (j = n - 1; j >= 0; j--)
2152 {
2153 hard_regno = ira_class_hard_regs[aclass][j];
2154 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2155 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2156 else
2157 {
2158 rclass = REGNO_REG_CLASS (hard_regno);
2159 num = cost_classes_ptr->index[rclass];
2160 if (num < 0)
2161 {
2162 num = cost_classes_ptr->hard_regno_index[hard_regno];
2163 ira_assert (num >= 0);
2164 }
2165 reg_costs[j] = COSTS (costs, i)->cost[num];
2166 }
2167 }
2168 }
2169 }
2170 if (optimize)
2171 ira_traverse_loop_tree (true, ira_loop_tree_root,
2172 process_bb_node_for_hard_reg_moves, NULL);
2173 }
2174
2175 \f
2176
2177 /* Function called once during compiler work. */
2178 void
2179 ira_init_costs_once (void)
2180 {
2181 int i;
2182
2183 init_cost = NULL;
2184 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2185 {
2186 op_costs[i] = NULL;
2187 this_op_costs[i] = NULL;
2188 }
2189 temp_costs = NULL;
2190 }
2191
2192 /* Free allocated temporary cost vectors. */
2193 void
2194 target_ira_int::free_ira_costs ()
2195 {
2196 int i;
2197
2198 free (x_init_cost);
2199 x_init_cost = NULL;
2200 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2201 {
2202 free (x_op_costs[i]);
2203 free (x_this_op_costs[i]);
2204 x_op_costs[i] = x_this_op_costs[i] = NULL;
2205 }
2206 free (x_temp_costs);
2207 x_temp_costs = NULL;
2208 }
2209
2210 /* This is called each time register related information is
2211 changed. */
2212 void
2213 ira_init_costs (void)
2214 {
2215 int i;
2216
2217 this_target_ira_int->free_ira_costs ();
2218 max_struct_costs_size
2219 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2220 /* Don't use ira_allocate because vectors live through several IRA
2221 calls. */
2222 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2223 init_cost->mem_cost = 1000000;
2224 for (i = 0; i < ira_important_classes_num; i++)
2225 init_cost->cost[i] = 1000000;
2226 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2227 {
2228 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2229 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2230 }
2231 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2232 }
2233
2234 \f
2235
2236 /* Common initialization function for ira_costs and
2237 ira_set_pseudo_classes. */
2238 static void
2239 init_costs (void)
2240 {
2241 init_subregs_of_mode ();
2242 costs = (struct costs *) ira_allocate (max_struct_costs_size
2243 * cost_elements_num);
2244 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2245 * cost_elements_num);
2246 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2247 * max_reg_num ());
2248 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2249 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2250 }
2251
2252 /* Common finalization function for ira_costs and
2253 ira_set_pseudo_classes. */
2254 static void
2255 finish_costs (void)
2256 {
2257 finish_subregs_of_mode ();
2258 ira_free (regno_equiv_gains);
2259 ira_free (regno_aclass);
2260 ira_free (pref_buffer);
2261 ira_free (costs);
2262 }
2263
2264 /* Entry function which defines register class, memory and hard
2265 register costs for each allocno. */
2266 void
2267 ira_costs (void)
2268 {
2269 allocno_p = true;
2270 cost_elements_num = ira_allocnos_num;
2271 init_costs ();
2272 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2273 * ira_allocnos_num);
2274 initiate_regno_cost_classes ();
2275 calculate_elim_costs_all_insns ();
2276 find_costs_and_classes (ira_dump_file);
2277 setup_allocno_class_and_costs ();
2278 finish_regno_cost_classes ();
2279 finish_costs ();
2280 ira_free (total_allocno_costs);
2281 }
2282
2283 /* Entry function which defines classes for pseudos.
2284 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2285 void
2286 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2287 {
2288 allocno_p = false;
2289 internal_flag_ira_verbose = flag_ira_verbose;
2290 cost_elements_num = max_reg_num ();
2291 init_costs ();
2292 initiate_regno_cost_classes ();
2293 find_costs_and_classes (dump_file);
2294 finish_regno_cost_classes ();
2295 if (define_pseudo_classes)
2296 pseudo_classes_defined_p = true;
2297
2298 finish_costs ();
2299 }
2300
2301 \f
2302
2303 /* Change hard register costs for allocnos which lives through
2304 function calls. This is called only when we found all intersected
2305 calls during building allocno live ranges. */
2306 void
2307 ira_tune_allocno_costs (void)
2308 {
2309 int j, n, regno;
2310 int cost, min_cost, *reg_costs;
2311 enum reg_class aclass;
2312 machine_mode mode;
2313 ira_allocno_t a;
2314 ira_allocno_iterator ai;
2315 ira_allocno_object_iterator oi;
2316 ira_object_t obj;
2317 bool skip_p;
2318
2319 FOR_EACH_ALLOCNO (a, ai)
2320 {
2321 aclass = ALLOCNO_CLASS (a);
2322 if (aclass == NO_REGS)
2323 continue;
2324 mode = ALLOCNO_MODE (a);
2325 n = ira_class_hard_regs_num[aclass];
2326 min_cost = INT_MAX;
2327 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2328 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2329 {
2330 ira_allocate_and_set_costs
2331 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2332 ALLOCNO_CLASS_COST (a));
2333 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2334 for (j = n - 1; j >= 0; j--)
2335 {
2336 regno = ira_class_hard_regs[aclass][j];
2337 skip_p = false;
2338 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2339 {
2340 if (ira_hard_reg_set_intersection_p (regno, mode,
2341 OBJECT_CONFLICT_HARD_REGS
2342 (obj)))
2343 {
2344 skip_p = true;
2345 break;
2346 }
2347 }
2348 if (skip_p)
2349 continue;
2350 cost = 0;
2351 if (ira_need_caller_save_p (a, regno))
2352 cost += ira_caller_save_cost (a);
2353 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2354 {
2355 auto rclass = REGNO_REG_CLASS (regno);
2356 cost += ((ira_memory_move_cost[mode][rclass][0]
2357 + ira_memory_move_cost[mode][rclass][1])
2358 * ALLOCNO_FREQ (a)
2359 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2360 }
2361 #endif
2362 if (INT_MAX - cost < reg_costs[j])
2363 reg_costs[j] = INT_MAX;
2364 else
2365 reg_costs[j] += cost;
2366 if (min_cost > reg_costs[j])
2367 min_cost = reg_costs[j];
2368 }
2369 }
2370 if (min_cost != INT_MAX)
2371 ALLOCNO_CLASS_COST (a) = min_cost;
2372
2373 /* Some targets allow pseudos to be allocated to unaligned sequences
2374 of hard registers. However, selecting an unaligned sequence can
2375 unnecessarily restrict later allocations. So increase the cost of
2376 unaligned hard regs to encourage the use of aligned hard regs. */
2377 {
2378 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2379
2380 if (nregs > 1)
2381 {
2382 ira_allocate_and_set_costs
2383 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2384 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2385 for (j = n - 1; j >= 0; j--)
2386 {
2387 regno = ira_non_ordered_class_hard_regs[aclass][j];
2388 if ((regno % nregs) != 0)
2389 {
2390 int index = ira_class_hard_reg_index[aclass][regno];
2391 ira_assert (index != -1);
2392 reg_costs[index] += ALLOCNO_FREQ (a);
2393 }
2394 }
2395 }
2396 }
2397 }
2398 }
2399
2400 /* Add COST to the estimated gain for eliminating REGNO with its
2401 equivalence. If COST is zero, record that no such elimination is
2402 possible. */
2403
2404 void
2405 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2406 {
2407 if (cost == 0)
2408 regno_equiv_gains[regno] = 0;
2409 else
2410 regno_equiv_gains[regno] += cost;
2411 }
2412
2413 void
2414 ira_costs_cc_finalize (void)
2415 {
2416 this_target_ira_int->free_ira_costs ();
2417 }