]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-costs.c
re PR target/88207 (gcc.target/i386/pr22076.c etc. FAIL)
[thirdparty/gcc.git] / gcc / ira-costs.c
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
37
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
41
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
44
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
47
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
52 {
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
57 };
58
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
69
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
72
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
75
76 /* It is the current size of struct costs. */
77 static size_t struct_costs_size;
78
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
88
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
93
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
96
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
99
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
103
104 /* Execution frequency of the current insn. */
105 static int frequency;
106
107 \f
108
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
111 {
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
122 };
123
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
127
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
130
131 /* Helper for cost_classes hashing. */
132
133 struct cost_classes_hasher : pointer_hash <cost_classes>
134 {
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
138 };
139
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
143 {
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
145 }
146
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 {
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
154 }
155
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
159 {
160 ira_free (v);
161 }
162
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
165
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
175
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
180 {
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
186 {
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 {
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
194 }
195 }
196 }
197
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
201 {
202 int size = sizeof (cost_classes_t) * max_reg_num ();
203
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
215 }
216
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
224 {
225 cost_classes_t classes_ptr;
226
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
233 }
234
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const HARD_REG_SET &regs)
241 {
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
246 {
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
249
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
254
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl;
258 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
259 AND_HARD_REG_SET (valid_for_cl, regs);
260 AND_COMPL_HARD_REG_SET (valid_for_cl,
261 ira_prohibited_class_mode_regs[cl][mode]);
262 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
263 if (hard_reg_set_empty_p (valid_for_cl))
264 continue;
265
266 /* Don't use this class if the set of valid registers is a subset
267 of an existing class. For example, suppose we have two classes
268 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
269 that the mode changes allowed by FR_REGS are not as general as
270 the mode changes allowed by GR_REGS.
271
272 In this situation, the mode changes for GR_AND_FR_REGS could
273 either be seen as the union or the intersection of the mode
274 changes allowed by the two subclasses. The justification for
275 the union-based definition would be that, if you want a mode
276 change that's only allowed by GR_REGS, you can pick a register
277 from the GR_REGS subclass. The justification for the
278 intersection-based definition would be that every register
279 from the class would allow the mode change.
280
281 However, if we have a register that needs to be in GR_REGS,
282 using GR_AND_FR_REGS with the intersection-based definition
283 would be too pessimistic, since it would bring in restrictions
284 that only apply to FR_REGS. Conversely, if we have a register
285 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
286 union-based definition would lose the extra restrictions
287 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
288 for cases where GR_REGS and FP_REGS are both valid. */
289 int pos;
290 for (pos = 0; pos < narrow.num; ++pos)
291 {
292 enum reg_class cl2 = narrow.classes[pos];
293 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
294 break;
295 }
296 map[i] = pos;
297 if (pos == narrow.num)
298 {
299 /* If several classes are equivalent, prefer to use the one
300 that was chosen as the allocno class. */
301 enum reg_class cl2 = ira_allocno_class_translate[cl];
302 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
303 cl = cl2;
304 narrow.classes[narrow.num++] = cl;
305 }
306 }
307 if (narrow.num == full->num)
308 return full;
309
310 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
311 if (*slot == NULL)
312 {
313 cost_classes_t classes = setup_cost_classes (&narrow);
314 /* Map equivalent classes to the representative that we chose above. */
315 for (int i = 0; i < ira_important_classes_num; i++)
316 {
317 enum reg_class cl = ira_important_classes[i];
318 int index = full->index[cl];
319 if (index >= 0)
320 classes->index[cl] = map[index];
321 }
322 *slot = classes;
323 }
324 return *slot;
325 }
326
327 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
328 This function is used when we know an initial approximation of
329 allocno class of the pseudo already, e.g. on the second iteration
330 of class cost calculation or after class cost calculation in
331 register-pressure sensitive insn scheduling or register-pressure
332 sensitive loop-invariant motion. */
333 static void
334 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
335 {
336 static struct cost_classes classes;
337 cost_classes_t classes_ptr;
338 enum reg_class cl;
339 int i;
340 cost_classes **slot;
341 HARD_REG_SET temp, temp2;
342 bool exclude_p;
343
344 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
345 {
346 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
347 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
348 /* We exclude classes from consideration which are subsets of
349 ACLASS only if ACLASS is an uniform class. */
350 exclude_p = ira_uniform_class_p[aclass];
351 classes.num = 0;
352 for (i = 0; i < ira_important_classes_num; i++)
353 {
354 cl = ira_important_classes[i];
355 if (exclude_p)
356 {
357 /* Exclude non-uniform classes which are subsets of
358 ACLASS. */
359 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
360 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
361 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
362 continue;
363 }
364 classes.classes[classes.num++] = cl;
365 }
366 slot = cost_classes_htab->find_slot (&classes, INSERT);
367 if (*slot == NULL)
368 {
369 classes_ptr = setup_cost_classes (&classes);
370 *slot = classes_ptr;
371 }
372 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
373 }
374 if (regno_reg_rtx[regno] != NULL_RTX)
375 {
376 /* Restrict the classes to those that are valid for REGNO's mode
377 (which might for example exclude singleton classes if the mode
378 requires two registers). Also restrict the classes to those that
379 are valid for subregs of REGNO. */
380 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
381 if (!valid_regs)
382 valid_regs = &reg_class_contents[ALL_REGS];
383 classes_ptr = restrict_cost_classes (classes_ptr,
384 PSEUDO_REGNO_MODE (regno),
385 *valid_regs);
386 }
387 regno_cost_classes[regno] = classes_ptr;
388 }
389
390 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
391 decrease number of cost classes for the pseudo, if hard registers
392 of some important classes can not hold a value of MODE. So the
393 pseudo can not get hard register of some important classes and cost
394 calculation for such important classes is only wasting CPU
395 time. */
396 static void
397 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
398 {
399 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
400 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
401 mode, *valid_regs);
402 else
403 {
404 if (cost_classes_mode_cache[mode] == NULL)
405 cost_classes_mode_cache[mode]
406 = restrict_cost_classes (&all_cost_classes, mode,
407 reg_class_contents[ALL_REGS]);
408 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
409 }
410 }
411
412 /* Finalize info about the cost classes for each pseudo. */
413 static void
414 finish_regno_cost_classes (void)
415 {
416 ira_free (regno_cost_classes);
417 delete cost_classes_htab;
418 cost_classes_htab = NULL;
419 }
420
421 \f
422
423 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
424 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
425 be a pseudo register. */
426 static int
427 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
428 secondary_reload_info *prev_sri)
429 {
430 secondary_reload_info sri;
431 reg_class_t secondary_class = NO_REGS;
432
433 /* If X is a SCRATCH, there is actually nothing to move since we are
434 assuming optimal allocation. */
435 if (GET_CODE (x) == SCRATCH)
436 return 0;
437
438 /* Get the class we will actually use for a reload. */
439 rclass = targetm.preferred_reload_class (x, rclass);
440
441 /* If we need a secondary reload for an intermediate, the cost is
442 that to load the input into the intermediate register, then to
443 copy it. */
444 sri.prev_sri = prev_sri;
445 sri.extra_cost = 0;
446 /* PR 68770: Secondary reload might examine the t_icode field. */
447 sri.t_icode = CODE_FOR_nothing;
448
449 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
450
451 if (secondary_class != NO_REGS)
452 {
453 ira_init_register_move_cost_if_necessary (mode);
454 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
455 + sri.extra_cost
456 + copy_cost (x, mode, secondary_class, to_p, &sri));
457 }
458
459 /* For memory, use the memory move cost, for (hard) registers, use
460 the cost to move between the register classes, and use 2 for
461 everything else (constants). */
462 if (MEM_P (x) || rclass == NO_REGS)
463 return sri.extra_cost
464 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
465 else if (REG_P (x))
466 {
467 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
468
469 ira_init_register_move_cost_if_necessary (mode);
470 return (sri.extra_cost
471 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
472 }
473 else
474 /* If this is a constant, we may eventually want to call rtx_cost
475 here. */
476 return sri.extra_cost + COSTS_N_INSNS (1);
477 }
478
479 \f
480
481 /* Record the cost of using memory or hard registers of various
482 classes for the operands in INSN.
483
484 N_ALTS is the number of alternatives.
485 N_OPS is the number of operands.
486 OPS is an array of the operands.
487 MODES are the modes of the operands, in case any are VOIDmode.
488 CONSTRAINTS are the constraints to use for the operands. This array
489 is modified by this procedure.
490
491 This procedure works alternative by alternative. For each
492 alternative we assume that we will be able to allocate all allocnos
493 to their ideal register class and calculate the cost of using that
494 alternative. Then we compute, for each operand that is a
495 pseudo-register, the cost of having the allocno allocated to each
496 register class and using it in that alternative. To this cost is
497 added the cost of the alternative.
498
499 The cost of each class for this insn is its lowest cost among all
500 the alternatives. */
501 static void
502 record_reg_classes (int n_alts, int n_ops, rtx *ops,
503 machine_mode *modes, const char **constraints,
504 rtx_insn *insn, enum reg_class *pref)
505 {
506 int alt;
507 int i, j, k;
508 int insn_allows_mem[MAX_RECOG_OPERANDS];
509 move_table *move_in_cost, *move_out_cost;
510 short (*mem_cost)[2];
511
512 for (i = 0; i < n_ops; i++)
513 insn_allows_mem[i] = 0;
514
515 /* Process each alternative, each time minimizing an operand's cost
516 with the cost for each operand in that alternative. */
517 alternative_mask preferred = get_preferred_alternatives (insn);
518 for (alt = 0; alt < n_alts; alt++)
519 {
520 enum reg_class classes[MAX_RECOG_OPERANDS];
521 int allows_mem[MAX_RECOG_OPERANDS];
522 enum reg_class rclass;
523 int alt_fail = 0;
524 int alt_cost = 0, op_cost_add;
525
526 if (!TEST_BIT (preferred, alt))
527 {
528 for (i = 0; i < recog_data.n_operands; i++)
529 constraints[i] = skip_alternative (constraints[i]);
530
531 continue;
532 }
533
534 for (i = 0; i < n_ops; i++)
535 {
536 unsigned char c;
537 const char *p = constraints[i];
538 rtx op = ops[i];
539 machine_mode mode = modes[i];
540 int allows_addr = 0;
541 int win = 0;
542
543 /* Initially show we know nothing about the register class. */
544 classes[i] = NO_REGS;
545 allows_mem[i] = 0;
546
547 /* If this operand has no constraints at all, we can
548 conclude nothing about it since anything is valid. */
549 if (*p == 0)
550 {
551 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
552 memset (this_op_costs[i], 0, struct_costs_size);
553 continue;
554 }
555
556 /* If this alternative is only relevant when this operand
557 matches a previous operand, we do different things
558 depending on whether this operand is a allocno-reg or not.
559 We must process any modifiers for the operand before we
560 can make this test. */
561 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
562 p++;
563
564 if (p[0] >= '0' && p[0] <= '0' + i)
565 {
566 /* Copy class and whether memory is allowed from the
567 matching alternative. Then perform any needed cost
568 computations and/or adjustments. */
569 j = p[0] - '0';
570 classes[i] = classes[j];
571 allows_mem[i] = allows_mem[j];
572 if (allows_mem[i])
573 insn_allows_mem[i] = 1;
574
575 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
576 {
577 /* If this matches the other operand, we have no
578 added cost and we win. */
579 if (rtx_equal_p (ops[j], op))
580 win = 1;
581 /* If we can put the other operand into a register,
582 add to the cost of this alternative the cost to
583 copy this operand to the register used for the
584 other operand. */
585 else if (classes[j] != NO_REGS)
586 {
587 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
588 win = 1;
589 }
590 }
591 else if (! REG_P (ops[j])
592 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
593 {
594 /* This op is an allocno but the one it matches is
595 not. */
596
597 /* If we can't put the other operand into a
598 register, this alternative can't be used. */
599
600 if (classes[j] == NO_REGS)
601 alt_fail = 1;
602 /* Otherwise, add to the cost of this alternative
603 the cost to copy the other operand to the hard
604 register used for this operand. */
605 else
606 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
607 }
608 else
609 {
610 /* The costs of this operand are not the same as the
611 other operand since move costs are not symmetric.
612 Moreover, if we cannot tie them, this alternative
613 needs to do a copy, which is one insn. */
614 struct costs *pp = this_op_costs[i];
615 int *pp_costs = pp->cost;
616 cost_classes_t cost_classes_ptr
617 = regno_cost_classes[REGNO (op)];
618 enum reg_class *cost_classes = cost_classes_ptr->classes;
619 bool in_p = recog_data.operand_type[i] != OP_OUT;
620 bool out_p = recog_data.operand_type[i] != OP_IN;
621 enum reg_class op_class = classes[i];
622
623 ira_init_register_move_cost_if_necessary (mode);
624 if (! in_p)
625 {
626 ira_assert (out_p);
627 if (op_class == NO_REGS)
628 {
629 mem_cost = ira_memory_move_cost[mode];
630 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
631 {
632 rclass = cost_classes[k];
633 pp_costs[k] = mem_cost[rclass][0] * frequency;
634 }
635 }
636 else
637 {
638 move_out_cost = ira_may_move_out_cost[mode];
639 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
640 {
641 rclass = cost_classes[k];
642 pp_costs[k]
643 = move_out_cost[op_class][rclass] * frequency;
644 }
645 }
646 }
647 else if (! out_p)
648 {
649 ira_assert (in_p);
650 if (op_class == NO_REGS)
651 {
652 mem_cost = ira_memory_move_cost[mode];
653 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 {
655 rclass = cost_classes[k];
656 pp_costs[k] = mem_cost[rclass][1] * frequency;
657 }
658 }
659 else
660 {
661 move_in_cost = ira_may_move_in_cost[mode];
662 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 {
664 rclass = cost_classes[k];
665 pp_costs[k]
666 = move_in_cost[rclass][op_class] * frequency;
667 }
668 }
669 }
670 else
671 {
672 if (op_class == NO_REGS)
673 {
674 mem_cost = ira_memory_move_cost[mode];
675 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
676 {
677 rclass = cost_classes[k];
678 pp_costs[k] = ((mem_cost[rclass][0]
679 + mem_cost[rclass][1])
680 * frequency);
681 }
682 }
683 else
684 {
685 move_in_cost = ira_may_move_in_cost[mode];
686 move_out_cost = ira_may_move_out_cost[mode];
687 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
688 {
689 rclass = cost_classes[k];
690 pp_costs[k] = ((move_in_cost[rclass][op_class]
691 + move_out_cost[op_class][rclass])
692 * frequency);
693 }
694 }
695 }
696
697 /* If the alternative actually allows memory, make
698 things a bit cheaper since we won't need an extra
699 insn to load it. */
700 pp->mem_cost
701 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
702 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
703 - allows_mem[i]) * frequency;
704
705 /* If we have assigned a class to this allocno in
706 our first pass, add a cost to this alternative
707 corresponding to what we would add if this
708 allocno were not in the appropriate class. */
709 if (pref)
710 {
711 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
712
713 if (pref_class == NO_REGS)
714 alt_cost
715 += ((out_p
716 ? ira_memory_move_cost[mode][op_class][0] : 0)
717 + (in_p
718 ? ira_memory_move_cost[mode][op_class][1]
719 : 0));
720 else if (ira_reg_class_intersect
721 [pref_class][op_class] == NO_REGS)
722 alt_cost
723 += ira_register_move_cost[mode][pref_class][op_class];
724 }
725 if (REGNO (ops[i]) != REGNO (ops[j])
726 && ! find_reg_note (insn, REG_DEAD, op))
727 alt_cost += 2;
728
729 p++;
730 }
731 }
732
733 /* Scan all the constraint letters. See if the operand
734 matches any of the constraints. Collect the valid
735 register classes and see if this operand accepts
736 memory. */
737 while ((c = *p))
738 {
739 switch (c)
740 {
741 case '*':
742 /* Ignore the next letter for this pass. */
743 c = *++p;
744 break;
745
746 case '^':
747 alt_cost += 2;
748 break;
749
750 case '?':
751 alt_cost += 2;
752 break;
753
754 case 'g':
755 if (MEM_P (op)
756 || (CONSTANT_P (op)
757 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
758 win = 1;
759 insn_allows_mem[i] = allows_mem[i] = 1;
760 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
761 break;
762
763 default:
764 enum constraint_num cn = lookup_constraint (p);
765 enum reg_class cl;
766 switch (get_constraint_type (cn))
767 {
768 case CT_REGISTER:
769 cl = reg_class_for_constraint (cn);
770 if (cl != NO_REGS)
771 classes[i] = ira_reg_class_subunion[classes[i]][cl];
772 break;
773
774 case CT_CONST_INT:
775 if (CONST_INT_P (op)
776 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
777 win = 1;
778 break;
779
780 case CT_MEMORY:
781 /* Every MEM can be reloaded to fit. */
782 insn_allows_mem[i] = allows_mem[i] = 1;
783 if (MEM_P (op))
784 win = 1;
785 break;
786
787 case CT_SPECIAL_MEMORY:
788 insn_allows_mem[i] = allows_mem[i] = 1;
789 if (MEM_P (op) && constraint_satisfied_p (op, cn))
790 win = 1;
791 break;
792
793 case CT_ADDRESS:
794 /* Every address can be reloaded to fit. */
795 allows_addr = 1;
796 if (address_operand (op, GET_MODE (op))
797 || constraint_satisfied_p (op, cn))
798 win = 1;
799 /* We know this operand is an address, so we
800 want it to be allocated to a hard register
801 that can be the base of an address,
802 i.e. BASE_REG_CLASS. */
803 classes[i]
804 = ira_reg_class_subunion[classes[i]]
805 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
806 ADDRESS, SCRATCH)];
807 break;
808
809 case CT_FIXED_FORM:
810 if (constraint_satisfied_p (op, cn))
811 win = 1;
812 break;
813 }
814 break;
815 }
816 p += CONSTRAINT_LEN (c, p);
817 if (c == ',')
818 break;
819 }
820
821 constraints[i] = p;
822
823 if (alt_fail)
824 break;
825
826 /* How we account for this operand now depends on whether it
827 is a pseudo register or not. If it is, we first check if
828 any register classes are valid. If not, we ignore this
829 alternative, since we want to assume that all allocnos get
830 allocated for register preferencing. If some register
831 class is valid, compute the costs of moving the allocno
832 into that class. */
833 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
834 {
835 if (classes[i] == NO_REGS && ! allows_mem[i])
836 {
837 /* We must always fail if the operand is a REG, but
838 we did not find a suitable class and memory is
839 not allowed.
840
841 Otherwise we may perform an uninitialized read
842 from this_op_costs after the `continue' statement
843 below. */
844 alt_fail = 1;
845 }
846 else
847 {
848 unsigned int regno = REGNO (op);
849 struct costs *pp = this_op_costs[i];
850 int *pp_costs = pp->cost;
851 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
852 enum reg_class *cost_classes = cost_classes_ptr->classes;
853 bool in_p = recog_data.operand_type[i] != OP_OUT;
854 bool out_p = recog_data.operand_type[i] != OP_IN;
855 enum reg_class op_class = classes[i];
856
857 ira_init_register_move_cost_if_necessary (mode);
858 if (! in_p)
859 {
860 ira_assert (out_p);
861 if (op_class == NO_REGS)
862 {
863 mem_cost = ira_memory_move_cost[mode];
864 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
865 {
866 rclass = cost_classes[k];
867 pp_costs[k] = mem_cost[rclass][0] * frequency;
868 }
869 }
870 else
871 {
872 move_out_cost = ira_may_move_out_cost[mode];
873 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
874 {
875 rclass = cost_classes[k];
876 pp_costs[k]
877 = move_out_cost[op_class][rclass] * frequency;
878 }
879 }
880 }
881 else if (! out_p)
882 {
883 ira_assert (in_p);
884 if (op_class == NO_REGS)
885 {
886 mem_cost = ira_memory_move_cost[mode];
887 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
888 {
889 rclass = cost_classes[k];
890 pp_costs[k] = mem_cost[rclass][1] * frequency;
891 }
892 }
893 else
894 {
895 move_in_cost = ira_may_move_in_cost[mode];
896 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
897 {
898 rclass = cost_classes[k];
899 pp_costs[k]
900 = move_in_cost[rclass][op_class] * frequency;
901 }
902 }
903 }
904 else
905 {
906 if (op_class == NO_REGS)
907 {
908 mem_cost = ira_memory_move_cost[mode];
909 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
910 {
911 rclass = cost_classes[k];
912 pp_costs[k] = ((mem_cost[rclass][0]
913 + mem_cost[rclass][1])
914 * frequency);
915 }
916 }
917 else
918 {
919 move_in_cost = ira_may_move_in_cost[mode];
920 move_out_cost = ira_may_move_out_cost[mode];
921 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
922 {
923 rclass = cost_classes[k];
924 pp_costs[k] = ((move_in_cost[rclass][op_class]
925 + move_out_cost[op_class][rclass])
926 * frequency);
927 }
928 }
929 }
930
931 if (op_class == NO_REGS)
932 /* Although we don't need insn to reload from
933 memory, still accessing memory is usually more
934 expensive than a register. */
935 pp->mem_cost = frequency;
936 else
937 /* If the alternative actually allows memory, make
938 things a bit cheaper since we won't need an
939 extra insn to load it. */
940 pp->mem_cost
941 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
942 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
943 - allows_mem[i]) * frequency;
944 /* If we have assigned a class to this allocno in
945 our first pass, add a cost to this alternative
946 corresponding to what we would add if this
947 allocno were not in the appropriate class. */
948 if (pref)
949 {
950 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
951
952 if (pref_class == NO_REGS)
953 {
954 if (op_class != NO_REGS)
955 alt_cost
956 += ((out_p
957 ? ira_memory_move_cost[mode][op_class][0]
958 : 0)
959 + (in_p
960 ? ira_memory_move_cost[mode][op_class][1]
961 : 0));
962 }
963 else if (op_class == NO_REGS)
964 alt_cost
965 += ((out_p
966 ? ira_memory_move_cost[mode][pref_class][1]
967 : 0)
968 + (in_p
969 ? ira_memory_move_cost[mode][pref_class][0]
970 : 0));
971 else if (ira_reg_class_intersect[pref_class][op_class]
972 == NO_REGS)
973 alt_cost += (ira_register_move_cost
974 [mode][pref_class][op_class]);
975 }
976 }
977 }
978
979 /* Otherwise, if this alternative wins, either because we
980 have already determined that or if we have a hard
981 register of the proper class, there is no cost for this
982 alternative. */
983 else if (win || (REG_P (op)
984 && reg_fits_class_p (op, classes[i],
985 0, GET_MODE (op))))
986 ;
987
988 /* If registers are valid, the cost of this alternative
989 includes copying the object to and/or from a
990 register. */
991 else if (classes[i] != NO_REGS)
992 {
993 if (recog_data.operand_type[i] != OP_OUT)
994 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
995
996 if (recog_data.operand_type[i] != OP_IN)
997 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
998 }
999 /* The only other way this alternative can be used is if
1000 this is a constant that could be placed into memory. */
1001 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1002 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1003 else
1004 alt_fail = 1;
1005
1006 if (alt_fail)
1007 break;
1008 }
1009
1010 if (alt_fail)
1011 {
1012 /* The loop above might have exited early once the failure
1013 was seen. Skip over the constraints for the remaining
1014 operands. */
1015 i += 1;
1016 for (; i < n_ops; ++i)
1017 constraints[i] = skip_alternative (constraints[i]);
1018 continue;
1019 }
1020
1021 op_cost_add = alt_cost * frequency;
1022 /* Finally, update the costs with the information we've
1023 calculated about this alternative. */
1024 for (i = 0; i < n_ops; i++)
1025 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1026 {
1027 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1028 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1029 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1030 cost_classes_t cost_classes_ptr
1031 = regno_cost_classes[REGNO (ops[i])];
1032
1033 pp->mem_cost = MIN (pp->mem_cost,
1034 (qq->mem_cost + op_cost_add) * scale);
1035
1036 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1037 pp_costs[k]
1038 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1039 }
1040 }
1041
1042 if (allocno_p)
1043 for (i = 0; i < n_ops; i++)
1044 {
1045 ira_allocno_t a;
1046 rtx op = ops[i];
1047
1048 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1049 continue;
1050 a = ira_curr_regno_allocno_map [REGNO (op)];
1051 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1052 ALLOCNO_BAD_SPILL_P (a) = true;
1053 }
1054
1055 }
1056
1057 \f
1058
1059 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1060 static inline bool
1061 ok_for_index_p_nonstrict (rtx reg)
1062 {
1063 unsigned regno = REGNO (reg);
1064
1065 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1066 }
1067
1068 /* A version of regno_ok_for_base_p for use here, when all
1069 pseudo-registers should count as OK. Arguments as for
1070 regno_ok_for_base_p. */
1071 static inline bool
1072 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1073 enum rtx_code outer_code, enum rtx_code index_code)
1074 {
1075 unsigned regno = REGNO (reg);
1076
1077 if (regno >= FIRST_PSEUDO_REGISTER)
1078 return true;
1079 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1080 }
1081
1082 /* Record the pseudo registers we must reload into hard registers in a
1083 subexpression of a memory address, X.
1084
1085 If CONTEXT is 0, we are looking at the base part of an address,
1086 otherwise we are looking at the index part.
1087
1088 MODE and AS are the mode and address space of the memory reference;
1089 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1090 These four arguments are passed down to base_reg_class.
1091
1092 SCALE is twice the amount to multiply the cost by (it is twice so
1093 we can represent half-cost adjustments). */
1094 static void
1095 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1096 int context, enum rtx_code outer_code,
1097 enum rtx_code index_code, int scale)
1098 {
1099 enum rtx_code code = GET_CODE (x);
1100 enum reg_class rclass;
1101
1102 if (context == 1)
1103 rclass = INDEX_REG_CLASS;
1104 else
1105 rclass = base_reg_class (mode, as, outer_code, index_code);
1106
1107 switch (code)
1108 {
1109 case CONST_INT:
1110 case CONST:
1111 case CC0:
1112 case PC:
1113 case SYMBOL_REF:
1114 case LABEL_REF:
1115 return;
1116
1117 case PLUS:
1118 /* When we have an address that is a sum, we must determine
1119 whether registers are "base" or "index" regs. If there is a
1120 sum of two registers, we must choose one to be the "base".
1121 Luckily, we can use the REG_POINTER to make a good choice
1122 most of the time. We only need to do this on machines that
1123 can have two registers in an address and where the base and
1124 index register classes are different.
1125
1126 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1127 but that seems bogus since it should only be set when we are
1128 sure the register is being used as a pointer. */
1129 {
1130 rtx arg0 = XEXP (x, 0);
1131 rtx arg1 = XEXP (x, 1);
1132 enum rtx_code code0 = GET_CODE (arg0);
1133 enum rtx_code code1 = GET_CODE (arg1);
1134
1135 /* Look inside subregs. */
1136 if (code0 == SUBREG)
1137 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1138 if (code1 == SUBREG)
1139 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1140
1141 /* If index registers do not appear, or coincide with base registers,
1142 just record registers in any non-constant operands. We
1143 assume here, as well as in the tests below, that all
1144 addresses are in canonical form. */
1145 if (MAX_REGS_PER_ADDRESS == 1
1146 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1147 {
1148 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1149 if (! CONSTANT_P (arg1))
1150 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1151 }
1152
1153 /* If the second operand is a constant integer, it doesn't
1154 change what class the first operand must be. */
1155 else if (CONST_SCALAR_INT_P (arg1))
1156 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1157 /* If the second operand is a symbolic constant, the first
1158 operand must be an index register. */
1159 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1160 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1161 /* If both operands are registers but one is already a hard
1162 register of index or reg-base class, give the other the
1163 class that the hard register is not. */
1164 else if (code0 == REG && code1 == REG
1165 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1166 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1167 || ok_for_index_p_nonstrict (arg0)))
1168 record_address_regs (mode, as, arg1,
1169 ok_for_base_p_nonstrict (arg0, mode, as,
1170 PLUS, REG) ? 1 : 0,
1171 PLUS, REG, scale);
1172 else if (code0 == REG && code1 == REG
1173 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1174 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1175 || ok_for_index_p_nonstrict (arg1)))
1176 record_address_regs (mode, as, arg0,
1177 ok_for_base_p_nonstrict (arg1, mode, as,
1178 PLUS, REG) ? 1 : 0,
1179 PLUS, REG, scale);
1180 /* If one operand is known to be a pointer, it must be the
1181 base with the other operand the index. Likewise if the
1182 other operand is a MULT. */
1183 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1184 {
1185 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1186 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1187 }
1188 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1189 {
1190 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1191 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1192 }
1193 /* Otherwise, count equal chances that each might be a base or
1194 index register. This case should be rare. */
1195 else
1196 {
1197 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1198 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1199 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1200 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1201 }
1202 }
1203 break;
1204
1205 /* Double the importance of an allocno that is incremented or
1206 decremented, since it would take two extra insns if it ends
1207 up in the wrong place. */
1208 case POST_MODIFY:
1209 case PRE_MODIFY:
1210 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1211 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1212 if (REG_P (XEXP (XEXP (x, 1), 1)))
1213 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1214 2 * scale);
1215 break;
1216
1217 case POST_INC:
1218 case PRE_INC:
1219 case POST_DEC:
1220 case PRE_DEC:
1221 /* Double the importance of an allocno that is incremented or
1222 decremented, since it would take two extra insns if it ends
1223 up in the wrong place. */
1224 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1225 break;
1226
1227 case REG:
1228 {
1229 struct costs *pp;
1230 int *pp_costs;
1231 enum reg_class i;
1232 int k, regno, add_cost;
1233 cost_classes_t cost_classes_ptr;
1234 enum reg_class *cost_classes;
1235 move_table *move_in_cost;
1236
1237 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1238 break;
1239
1240 regno = REGNO (x);
1241 if (allocno_p)
1242 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1243 pp = COSTS (costs, COST_INDEX (regno));
1244 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1245 if (INT_MAX - add_cost < pp->mem_cost)
1246 pp->mem_cost = INT_MAX;
1247 else
1248 pp->mem_cost += add_cost;
1249 cost_classes_ptr = regno_cost_classes[regno];
1250 cost_classes = cost_classes_ptr->classes;
1251 pp_costs = pp->cost;
1252 ira_init_register_move_cost_if_necessary (Pmode);
1253 move_in_cost = ira_may_move_in_cost[Pmode];
1254 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1255 {
1256 i = cost_classes[k];
1257 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1258 if (INT_MAX - add_cost < pp_costs[k])
1259 pp_costs[k] = INT_MAX;
1260 else
1261 pp_costs[k] += add_cost;
1262 }
1263 }
1264 break;
1265
1266 default:
1267 {
1268 const char *fmt = GET_RTX_FORMAT (code);
1269 int i;
1270 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1271 if (fmt[i] == 'e')
1272 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1273 scale);
1274 }
1275 }
1276 }
1277
1278 \f
1279
1280 /* Calculate the costs of insn operands. */
1281 static void
1282 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1283 {
1284 const char *constraints[MAX_RECOG_OPERANDS];
1285 machine_mode modes[MAX_RECOG_OPERANDS];
1286 rtx set;
1287 int i;
1288
1289 if ((set = single_set (insn)) != NULL_RTX
1290 /* In rare cases the single set insn might have less 2 operands
1291 as the source can be a fixed special reg. */
1292 && recog_data.n_operands > 1
1293 && recog_data.operand[0] == SET_DEST (set)
1294 && recog_data.operand[1] == SET_SRC (set))
1295 {
1296 int regno, other_regno;
1297 rtx dest = SET_DEST (set);
1298 rtx src = SET_SRC (set);
1299
1300 if (GET_CODE (dest) == SUBREG
1301 && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1302 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1303 dest = SUBREG_REG (dest);
1304 if (GET_CODE (src) == SUBREG
1305 && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1306 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1307 src = SUBREG_REG (src);
1308 if (REG_P (src) && REG_P (dest)
1309 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1310 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1311 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1312 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1313 {
1314 machine_mode mode = GET_MODE (SET_SRC (set));
1315 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1316 enum reg_class *cost_classes = cost_classes_ptr->classes;
1317 reg_class_t rclass, hard_reg_class, pref_class;
1318 int cost, k;
1319 bool dead_p = find_regno_note (insn, REG_DEAD, REGNO (src));
1320
1321 ira_init_register_move_cost_if_necessary (mode);
1322 hard_reg_class = REGNO_REG_CLASS (other_regno);
1323 /* Target code may return any cost for mode which does not
1324 fit the the hard reg class (e.g. DImode for AREG on
1325 i386). Check this and use a bigger class to get the
1326 right cost. */
1327 if (! ira_hard_reg_in_set_p (other_regno, mode,
1328 reg_class_contents[hard_reg_class]))
1329 hard_reg_class = ira_pressure_class_translate[hard_reg_class];
1330 i = regno == (int) REGNO (src) ? 1 : 0;
1331 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1332 {
1333 rclass = cost_classes[k];
1334 cost = ((i == 0
1335 ? ira_register_move_cost[mode][hard_reg_class][rclass]
1336 : ira_register_move_cost[mode][rclass][hard_reg_class])
1337 * frequency);
1338 op_costs[i]->cost[k] = cost;
1339 /* If we have assigned a class to this allocno in our
1340 first pass, add a cost to this alternative
1341 corresponding to what we would add if this allocno
1342 were not in the appropriate class. */
1343 if (pref)
1344 {
1345 if ((pref_class = pref[COST_INDEX (regno)]) == NO_REGS)
1346 op_costs[i]->cost[k]
1347 += ((i == 0 ? ira_memory_move_cost[mode][rclass][0] : 0)
1348 + (i == 1 ? ira_memory_move_cost[mode][rclass][1] : 0)
1349 * frequency);
1350 else if (ira_reg_class_intersect[pref_class][rclass]
1351 == NO_REGS)
1352 op_costs[i]->cost[k]
1353 += (ira_register_move_cost[mode][pref_class][rclass]
1354 * frequency);
1355 }
1356 /* If this insn is a single set copying operand 1 to
1357 operand 0 and one operand is an allocno with the
1358 other a hard reg or an allocno that prefers a hard
1359 register that is in its own register class then we
1360 may want to adjust the cost of that register class to
1361 -1.
1362
1363 Avoid the adjustment if the source does not die to
1364 avoid stressing of register allocator by preferencing
1365 two colliding registers into single class. */
1366 if (dead_p
1367 && TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1368 && (reg_class_size[(int) rclass]
1369 == (ira_reg_class_max_nregs
1370 [(int) rclass][(int) GET_MODE(src)])))
1371 {
1372 if (reg_class_size[rclass] == 1)
1373 op_costs[i]->cost[k] = -frequency;
1374 else if (in_hard_reg_set_p (reg_class_contents[rclass],
1375 GET_MODE(src), other_regno))
1376 op_costs[i]->cost[k] = -frequency;
1377 }
1378 }
1379 op_costs[i]->mem_cost
1380 = ira_memory_move_cost[mode][hard_reg_class][i] * frequency;
1381 if (pref && (pref_class = pref[COST_INDEX (regno)]) != NO_REGS)
1382 op_costs[i]->mem_cost
1383 += ira_memory_move_cost[mode][pref_class][i] * frequency;
1384 return;
1385 }
1386 }
1387
1388 for (i = 0; i < recog_data.n_operands; i++)
1389 {
1390 constraints[i] = recog_data.constraints[i];
1391 modes[i] = recog_data.operand_mode[i];
1392 }
1393
1394 /* If we get here, we are set up to record the costs of all the
1395 operands for this insn. Start by initializing the costs. Then
1396 handle any address registers. Finally record the desired classes
1397 for any allocnos, doing it twice if some pair of operands are
1398 commutative. */
1399 for (i = 0; i < recog_data.n_operands; i++)
1400 {
1401 memcpy (op_costs[i], init_cost, struct_costs_size);
1402
1403 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1404 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1405
1406 if (MEM_P (recog_data.operand[i]))
1407 record_address_regs (GET_MODE (recog_data.operand[i]),
1408 MEM_ADDR_SPACE (recog_data.operand[i]),
1409 XEXP (recog_data.operand[i], 0),
1410 0, MEM, SCRATCH, frequency * 2);
1411 else if (constraints[i][0] == 'p'
1412 || (insn_extra_address_constraint
1413 (lookup_constraint (constraints[i]))))
1414 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1415 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1416 frequency * 2);
1417 }
1418
1419 /* Check for commutative in a separate loop so everything will have
1420 been initialized. We must do this even if one operand is a
1421 constant--see addsi3 in m68k.md. */
1422 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1423 if (constraints[i][0] == '%')
1424 {
1425 const char *xconstraints[MAX_RECOG_OPERANDS];
1426 int j;
1427
1428 /* Handle commutative operands by swapping the
1429 constraints. We assume the modes are the same. */
1430 for (j = 0; j < recog_data.n_operands; j++)
1431 xconstraints[j] = constraints[j];
1432
1433 xconstraints[i] = constraints[i+1];
1434 xconstraints[i+1] = constraints[i];
1435 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1436 recog_data.operand, modes,
1437 xconstraints, insn, pref);
1438 }
1439 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1440 recog_data.operand, modes,
1441 constraints, insn, pref);
1442 }
1443
1444 \f
1445
1446 /* Process one insn INSN. Scan it and record each time it would save
1447 code to put a certain allocnos in a certain class. Return the last
1448 insn processed, so that the scan can be continued from there. */
1449 static rtx_insn *
1450 scan_one_insn (rtx_insn *insn)
1451 {
1452 enum rtx_code pat_code;
1453 rtx set, note;
1454 int i, k;
1455 bool counted_mem;
1456
1457 if (!NONDEBUG_INSN_P (insn))
1458 return insn;
1459
1460 pat_code = GET_CODE (PATTERN (insn));
1461 if (pat_code == ASM_INPUT)
1462 return insn;
1463
1464 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1465 and initialize the register move costs of mode M.
1466
1467 The pseudo may be related to another pseudo via a copy (implicit or
1468 explicit) and if there are no mode M uses/sets of the original
1469 pseudo, then we may leave the register move costs uninitialized for
1470 mode M. */
1471 if (pat_code == USE || pat_code == CLOBBER)
1472 {
1473 rtx x = XEXP (PATTERN (insn), 0);
1474 if (GET_CODE (x) == REG
1475 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1476 && have_regs_of_mode[GET_MODE (x)])
1477 ira_init_register_move_cost_if_necessary (GET_MODE (x));
1478 return insn;
1479 }
1480
1481 if (pat_code == CLOBBER_HIGH)
1482 {
1483 gcc_assert (REG_P (XEXP (PATTERN (insn), 0))
1484 && HARD_REGISTER_P (XEXP (PATTERN (insn), 0)));
1485 return insn;
1486 }
1487
1488 counted_mem = false;
1489 set = single_set (insn);
1490 extract_insn (insn);
1491
1492 /* If this insn loads a parameter from its stack slot, then it
1493 represents a savings, rather than a cost, if the parameter is
1494 stored in memory. Record this fact.
1495
1496 Similarly if we're loading other constants from memory (constant
1497 pool, TOC references, small data areas, etc) and this is the only
1498 assignment to the destination pseudo.
1499
1500 Don't do this if SET_SRC (set) isn't a general operand, if it is
1501 a memory requiring special instructions to load it, decreasing
1502 mem_cost might result in it being loaded using the specialized
1503 instruction into a register, then stored into stack and loaded
1504 again from the stack. See PR52208.
1505
1506 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1507 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1508 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1509 && ((MEM_P (XEXP (note, 0))
1510 && !side_effects_p (SET_SRC (set)))
1511 || (CONSTANT_P (XEXP (note, 0))
1512 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1513 XEXP (note, 0))
1514 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1515 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1516 /* LRA does not use equiv with a symbol for PIC code. */
1517 && (! ira_use_lra_p || ! pic_offset_table_rtx
1518 || ! contains_symbol_ref_p (XEXP (note, 0))))
1519 {
1520 enum reg_class cl = GENERAL_REGS;
1521 rtx reg = SET_DEST (set);
1522 int num = COST_INDEX (REGNO (reg));
1523
1524 COSTS (costs, num)->mem_cost
1525 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1526 record_address_regs (GET_MODE (SET_SRC (set)),
1527 MEM_ADDR_SPACE (SET_SRC (set)),
1528 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1529 frequency * 2);
1530 counted_mem = true;
1531 }
1532
1533 record_operand_costs (insn, pref);
1534
1535 /* Now add the cost for each operand to the total costs for its
1536 allocno. */
1537 for (i = 0; i < recog_data.n_operands; i++)
1538 {
1539 rtx op = recog_data.operand[i];
1540
1541 if (GET_CODE (op) == SUBREG)
1542 op = SUBREG_REG (op);
1543 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1544 {
1545 int regno = REGNO (op);
1546 struct costs *p = COSTS (costs, COST_INDEX (regno));
1547 struct costs *q = op_costs[i];
1548 int *p_costs = p->cost, *q_costs = q->cost;
1549 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1550 int add_cost;
1551
1552 /* If the already accounted for the memory "cost" above, don't
1553 do so again. */
1554 if (!counted_mem)
1555 {
1556 add_cost = q->mem_cost;
1557 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1558 p->mem_cost = INT_MAX;
1559 else
1560 p->mem_cost += add_cost;
1561 }
1562 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1563 {
1564 add_cost = q_costs[k];
1565 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1566 p_costs[k] = INT_MAX;
1567 else
1568 p_costs[k] += add_cost;
1569 }
1570 }
1571 }
1572 return insn;
1573 }
1574
1575 \f
1576
1577 /* Print allocnos costs to file F. */
1578 static void
1579 print_allocno_costs (FILE *f)
1580 {
1581 int k;
1582 ira_allocno_t a;
1583 ira_allocno_iterator ai;
1584
1585 ira_assert (allocno_p);
1586 fprintf (f, "\n");
1587 FOR_EACH_ALLOCNO (a, ai)
1588 {
1589 int i, rclass;
1590 basic_block bb;
1591 int regno = ALLOCNO_REGNO (a);
1592 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1593 enum reg_class *cost_classes = cost_classes_ptr->classes;
1594
1595 i = ALLOCNO_NUM (a);
1596 fprintf (f, " a%d(r%d,", i, regno);
1597 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1598 fprintf (f, "b%d", bb->index);
1599 else
1600 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1601 fprintf (f, ") costs:");
1602 for (k = 0; k < cost_classes_ptr->num; k++)
1603 {
1604 rclass = cost_classes[k];
1605 fprintf (f, " %s:%d", reg_class_names[rclass],
1606 COSTS (costs, i)->cost[k]);
1607 if (flag_ira_region == IRA_REGION_ALL
1608 || flag_ira_region == IRA_REGION_MIXED)
1609 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1610 }
1611 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1612 if (flag_ira_region == IRA_REGION_ALL
1613 || flag_ira_region == IRA_REGION_MIXED)
1614 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1615 fprintf (f, "\n");
1616 }
1617 }
1618
1619 /* Print pseudo costs to file F. */
1620 static void
1621 print_pseudo_costs (FILE *f)
1622 {
1623 int regno, k;
1624 int rclass;
1625 cost_classes_t cost_classes_ptr;
1626 enum reg_class *cost_classes;
1627
1628 ira_assert (! allocno_p);
1629 fprintf (f, "\n");
1630 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1631 {
1632 if (REG_N_REFS (regno) <= 0)
1633 continue;
1634 cost_classes_ptr = regno_cost_classes[regno];
1635 cost_classes = cost_classes_ptr->classes;
1636 fprintf (f, " r%d costs:", regno);
1637 for (k = 0; k < cost_classes_ptr->num; k++)
1638 {
1639 rclass = cost_classes[k];
1640 fprintf (f, " %s:%d", reg_class_names[rclass],
1641 COSTS (costs, regno)->cost[k]);
1642 }
1643 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1644 }
1645 }
1646
1647 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1648 costs. */
1649 static void
1650 process_bb_for_costs (basic_block bb)
1651 {
1652 rtx_insn *insn;
1653
1654 frequency = REG_FREQ_FROM_BB (bb);
1655 if (frequency == 0)
1656 frequency = 1;
1657 FOR_BB_INSNS (bb, insn)
1658 insn = scan_one_insn (insn);
1659 }
1660
1661 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1662 costs. */
1663 static void
1664 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1665 {
1666 basic_block bb;
1667
1668 bb = loop_tree_node->bb;
1669 if (bb != NULL)
1670 process_bb_for_costs (bb);
1671 }
1672
1673 /* Find costs of register classes and memory for allocnos or pseudos
1674 and their best costs. Set up preferred, alternative and allocno
1675 classes for pseudos. */
1676 static void
1677 find_costs_and_classes (FILE *dump_file)
1678 {
1679 int i, k, start, max_cost_classes_num;
1680 int pass;
1681 basic_block bb;
1682 enum reg_class *regno_best_class, new_class;
1683
1684 init_recog ();
1685 regno_best_class
1686 = (enum reg_class *) ira_allocate (max_reg_num ()
1687 * sizeof (enum reg_class));
1688 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1689 regno_best_class[i] = NO_REGS;
1690 if (!resize_reg_info () && allocno_p
1691 && pseudo_classes_defined_p && flag_expensive_optimizations)
1692 {
1693 ira_allocno_t a;
1694 ira_allocno_iterator ai;
1695
1696 pref = pref_buffer;
1697 max_cost_classes_num = 1;
1698 FOR_EACH_ALLOCNO (a, ai)
1699 {
1700 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1701 setup_regno_cost_classes_by_aclass
1702 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1703 max_cost_classes_num
1704 = MAX (max_cost_classes_num,
1705 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1706 }
1707 start = 1;
1708 }
1709 else
1710 {
1711 pref = NULL;
1712 max_cost_classes_num = ira_important_classes_num;
1713 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1714 if (regno_reg_rtx[i] != NULL_RTX)
1715 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1716 else
1717 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1718 start = 0;
1719 }
1720 if (allocno_p)
1721 /* Clear the flag for the next compiled function. */
1722 pseudo_classes_defined_p = false;
1723 /* Normally we scan the insns once and determine the best class to
1724 use for each allocno. However, if -fexpensive-optimizations are
1725 on, we do so twice, the second time using the tentative best
1726 classes to guide the selection. */
1727 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1728 {
1729 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1730 fprintf (dump_file,
1731 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1732
1733 if (pass != start)
1734 {
1735 max_cost_classes_num = 1;
1736 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1737 {
1738 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1739 max_cost_classes_num
1740 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1741 }
1742 }
1743
1744 struct_costs_size
1745 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1746 /* Zero out our accumulation of the cost of each class for each
1747 allocno. */
1748 memset (costs, 0, cost_elements_num * struct_costs_size);
1749
1750 if (allocno_p)
1751 {
1752 /* Scan the instructions and record each time it would save code
1753 to put a certain allocno in a certain class. */
1754 ira_traverse_loop_tree (true, ira_loop_tree_root,
1755 process_bb_node_for_costs, NULL);
1756
1757 memcpy (total_allocno_costs, costs,
1758 max_struct_costs_size * ira_allocnos_num);
1759 }
1760 else
1761 {
1762 basic_block bb;
1763
1764 FOR_EACH_BB_FN (bb, cfun)
1765 process_bb_for_costs (bb);
1766 }
1767
1768 if (pass == 0)
1769 pref = pref_buffer;
1770
1771 /* Now for each allocno look at how desirable each class is and
1772 find which class is preferred. */
1773 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1774 {
1775 ira_allocno_t a, parent_a;
1776 int rclass, a_num, parent_a_num, add_cost;
1777 ira_loop_tree_node_t parent;
1778 int best_cost, allocno_cost;
1779 enum reg_class best, alt_class;
1780 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1781 enum reg_class *cost_classes;
1782 int *i_costs = temp_costs->cost;
1783 int i_mem_cost;
1784 int equiv_savings = regno_equiv_gains[i];
1785
1786 if (! allocno_p)
1787 {
1788 if (regno_reg_rtx[i] == NULL_RTX)
1789 continue;
1790 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1791 i_mem_cost = temp_costs->mem_cost;
1792 cost_classes = cost_classes_ptr->classes;
1793 }
1794 else
1795 {
1796 if (ira_regno_allocno_map[i] == NULL)
1797 continue;
1798 memset (temp_costs, 0, struct_costs_size);
1799 i_mem_cost = 0;
1800 cost_classes = cost_classes_ptr->classes;
1801 /* Find cost of all allocnos with the same regno. */
1802 for (a = ira_regno_allocno_map[i];
1803 a != NULL;
1804 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1805 {
1806 int *a_costs, *p_costs;
1807
1808 a_num = ALLOCNO_NUM (a);
1809 if ((flag_ira_region == IRA_REGION_ALL
1810 || flag_ira_region == IRA_REGION_MIXED)
1811 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1812 && (parent_a = parent->regno_allocno_map[i]) != NULL
1813 /* There are no caps yet. */
1814 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1815 (a)->border_allocnos,
1816 ALLOCNO_NUM (a)))
1817 {
1818 /* Propagate costs to upper levels in the region
1819 tree. */
1820 parent_a_num = ALLOCNO_NUM (parent_a);
1821 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1822 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1823 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1824 {
1825 add_cost = a_costs[k];
1826 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1827 p_costs[k] = INT_MAX;
1828 else
1829 p_costs[k] += add_cost;
1830 }
1831 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1832 if (add_cost > 0
1833 && (INT_MAX - add_cost
1834 < COSTS (total_allocno_costs,
1835 parent_a_num)->mem_cost))
1836 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1837 = INT_MAX;
1838 else
1839 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1840 += add_cost;
1841
1842 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1843 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1844 }
1845 a_costs = COSTS (costs, a_num)->cost;
1846 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1847 {
1848 add_cost = a_costs[k];
1849 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1850 i_costs[k] = INT_MAX;
1851 else
1852 i_costs[k] += add_cost;
1853 }
1854 add_cost = COSTS (costs, a_num)->mem_cost;
1855 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1856 i_mem_cost = INT_MAX;
1857 else
1858 i_mem_cost += add_cost;
1859 }
1860 }
1861 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1862 i_mem_cost = 0;
1863 else if (equiv_savings < 0)
1864 i_mem_cost = -equiv_savings;
1865 else if (equiv_savings > 0)
1866 {
1867 i_mem_cost = 0;
1868 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1869 i_costs[k] += equiv_savings;
1870 }
1871
1872 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1873 best = ALL_REGS;
1874 alt_class = NO_REGS;
1875 /* Find best common class for all allocnos with the same
1876 regno. */
1877 for (k = 0; k < cost_classes_ptr->num; k++)
1878 {
1879 rclass = cost_classes[k];
1880 if (i_costs[k] < best_cost)
1881 {
1882 best_cost = i_costs[k];
1883 best = (enum reg_class) rclass;
1884 }
1885 else if (i_costs[k] == best_cost)
1886 best = ira_reg_class_subunion[best][rclass];
1887 if (pass == flag_expensive_optimizations
1888 /* We still prefer registers to memory even at this
1889 stage if their costs are the same. We will make
1890 a final decision during assigning hard registers
1891 when we have all info including more accurate
1892 costs which might be affected by assigning hard
1893 registers to other pseudos because the pseudos
1894 involved in moves can be coalesced. */
1895 && i_costs[k] <= i_mem_cost
1896 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1897 > reg_class_size[alt_class]))
1898 alt_class = reg_class_subunion[alt_class][rclass];
1899 }
1900 alt_class = ira_allocno_class_translate[alt_class];
1901 if (best_cost > i_mem_cost
1902 && ! non_spilled_static_chain_regno_p (i))
1903 regno_aclass[i] = NO_REGS;
1904 else if (!optimize && !targetm.class_likely_spilled_p (best))
1905 /* Registers in the alternative class are likely to need
1906 longer or slower sequences than registers in the best class.
1907 When optimizing we make some effort to use the best class
1908 over the alternative class where possible, but at -O0 we
1909 effectively give the alternative class equal weight.
1910 We then run the risk of using slower alternative registers
1911 when plenty of registers from the best class are still free.
1912 This is especially true because live ranges tend to be very
1913 short in -O0 code and so register pressure tends to be low.
1914
1915 Avoid that by ignoring the alternative class if the best
1916 class has plenty of registers.
1917
1918 The union class arrays give important classes and only
1919 part of it are allocno classes. So translate them into
1920 allocno classes. */
1921 regno_aclass[i] = ira_allocno_class_translate[best];
1922 else
1923 {
1924 /* Make the common class the biggest class of best and
1925 alt_class. Translate the common class into an
1926 allocno class too. */
1927 regno_aclass[i] = (ira_allocno_class_translate
1928 [ira_reg_class_superunion[best][alt_class]]);
1929 ira_assert (regno_aclass[i] != NO_REGS
1930 && ira_reg_allocno_class_p[regno_aclass[i]]);
1931 }
1932 if ((new_class
1933 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1934 (i, regno_aclass[i], best))) != regno_aclass[i])
1935 {
1936 regno_aclass[i] = new_class;
1937 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1938 reg_class_contents[best]))
1939 best = new_class;
1940 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1941 reg_class_contents[alt_class]))
1942 alt_class = new_class;
1943 }
1944 if (pass == flag_expensive_optimizations)
1945 {
1946 if (best_cost > i_mem_cost
1947 /* Do not assign NO_REGS to static chain pointer
1948 pseudo when non-local goto is used. */
1949 && ! non_spilled_static_chain_regno_p (i))
1950 best = alt_class = NO_REGS;
1951 else if (best == alt_class)
1952 alt_class = NO_REGS;
1953 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1954 if ((!allocno_p || internal_flag_ira_verbose > 2)
1955 && dump_file != NULL)
1956 fprintf (dump_file,
1957 " r%d: preferred %s, alternative %s, allocno %s\n",
1958 i, reg_class_names[best], reg_class_names[alt_class],
1959 reg_class_names[regno_aclass[i]]);
1960 }
1961 regno_best_class[i] = best;
1962 if (! allocno_p)
1963 {
1964 pref[i] = (best_cost > i_mem_cost
1965 && ! non_spilled_static_chain_regno_p (i)
1966 ? NO_REGS : best);
1967 continue;
1968 }
1969 for (a = ira_regno_allocno_map[i];
1970 a != NULL;
1971 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1972 {
1973 enum reg_class aclass = regno_aclass[i];
1974 int a_num = ALLOCNO_NUM (a);
1975 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1976 int *a_costs = COSTS (costs, a_num)->cost;
1977
1978 if (aclass == NO_REGS)
1979 best = NO_REGS;
1980 else
1981 {
1982 /* Finding best class which is subset of the common
1983 class. */
1984 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1985 allocno_cost = best_cost;
1986 best = ALL_REGS;
1987 for (k = 0; k < cost_classes_ptr->num; k++)
1988 {
1989 rclass = cost_classes[k];
1990 if (! ira_class_subset_p[rclass][aclass])
1991 continue;
1992 if (total_a_costs[k] < best_cost)
1993 {
1994 best_cost = total_a_costs[k];
1995 allocno_cost = a_costs[k];
1996 best = (enum reg_class) rclass;
1997 }
1998 else if (total_a_costs[k] == best_cost)
1999 {
2000 best = ira_reg_class_subunion[best][rclass];
2001 allocno_cost = MAX (allocno_cost, a_costs[k]);
2002 }
2003 }
2004 ALLOCNO_CLASS_COST (a) = allocno_cost;
2005 }
2006 if (internal_flag_ira_verbose > 2 && dump_file != NULL
2007 && (pass == 0 || pref[a_num] != best))
2008 {
2009 fprintf (dump_file, " a%d (r%d,", a_num, i);
2010 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
2011 fprintf (dump_file, "b%d", bb->index);
2012 else
2013 fprintf (dump_file, "l%d",
2014 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
2015 fprintf (dump_file, ") best %s, allocno %s\n",
2016 reg_class_names[best],
2017 reg_class_names[aclass]);
2018 }
2019 pref[a_num] = best;
2020 if (pass == flag_expensive_optimizations && best != aclass
2021 && ira_class_hard_regs_num[best] > 0
2022 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
2023 >= ira_class_hard_regs_num[best]))
2024 {
2025 int ind = cost_classes_ptr->index[aclass];
2026
2027 ira_assert (ind >= 0);
2028 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
2029 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
2030 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
2031 / (ira_register_move_cost
2032 [ALLOCNO_MODE (a)][best][aclass]));
2033 for (k = 0; k < cost_classes_ptr->num; k++)
2034 if (ira_class_subset_p[cost_classes[k]][best])
2035 a_costs[k] = a_costs[ind];
2036 }
2037 }
2038 }
2039
2040 if (internal_flag_ira_verbose > 4 && dump_file)
2041 {
2042 if (allocno_p)
2043 print_allocno_costs (dump_file);
2044 else
2045 print_pseudo_costs (dump_file);
2046 fprintf (dump_file,"\n");
2047 }
2048 }
2049 ira_free (regno_best_class);
2050 }
2051
2052 \f
2053
2054 /* Process moves involving hard regs to modify allocno hard register
2055 costs. We can do this only after determining allocno class. If a
2056 hard register forms a register class, then moves with the hard
2057 register are already taken into account in class costs for the
2058 allocno. */
2059 static void
2060 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2061 {
2062 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2063 bool to_p;
2064 ira_allocno_t a, curr_a;
2065 ira_loop_tree_node_t curr_loop_tree_node;
2066 enum reg_class rclass;
2067 basic_block bb;
2068 rtx_insn *insn;
2069 rtx set, src, dst;
2070
2071 bb = loop_tree_node->bb;
2072 if (bb == NULL)
2073 return;
2074 freq = REG_FREQ_FROM_BB (bb);
2075 if (freq == 0)
2076 freq = 1;
2077 FOR_BB_INSNS (bb, insn)
2078 {
2079 if (!NONDEBUG_INSN_P (insn))
2080 continue;
2081 set = single_set (insn);
2082 if (set == NULL_RTX)
2083 continue;
2084 dst = SET_DEST (set);
2085 src = SET_SRC (set);
2086 if (! REG_P (dst) || ! REG_P (src))
2087 continue;
2088 dst_regno = REGNO (dst);
2089 src_regno = REGNO (src);
2090 if (dst_regno >= FIRST_PSEUDO_REGISTER
2091 && src_regno < FIRST_PSEUDO_REGISTER)
2092 {
2093 hard_regno = src_regno;
2094 a = ira_curr_regno_allocno_map[dst_regno];
2095 to_p = true;
2096 }
2097 else if (src_regno >= FIRST_PSEUDO_REGISTER
2098 && dst_regno < FIRST_PSEUDO_REGISTER)
2099 {
2100 hard_regno = dst_regno;
2101 a = ira_curr_regno_allocno_map[src_regno];
2102 to_p = false;
2103 }
2104 else
2105 continue;
2106 rclass = ALLOCNO_CLASS (a);
2107 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2108 continue;
2109 i = ira_class_hard_reg_index[rclass][hard_regno];
2110 if (i < 0)
2111 continue;
2112 a_regno = ALLOCNO_REGNO (a);
2113 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2114 curr_loop_tree_node != NULL;
2115 curr_loop_tree_node = curr_loop_tree_node->parent)
2116 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2117 ira_add_allocno_pref (curr_a, hard_regno, freq);
2118 {
2119 int cost;
2120 enum reg_class hard_reg_class;
2121 machine_mode mode;
2122
2123 mode = ALLOCNO_MODE (a);
2124 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2125 ira_init_register_move_cost_if_necessary (mode);
2126 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2127 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2128 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2129 ALLOCNO_CLASS_COST (a));
2130 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2131 rclass, 0);
2132 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2133 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2134 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2135 ALLOCNO_HARD_REG_COSTS (a)[i]);
2136 }
2137 }
2138 }
2139
2140 /* After we find hard register and memory costs for allocnos, define
2141 its class and modify hard register cost because insns moving
2142 allocno to/from hard registers. */
2143 static void
2144 setup_allocno_class_and_costs (void)
2145 {
2146 int i, j, n, regno, hard_regno, num;
2147 int *reg_costs;
2148 enum reg_class aclass, rclass;
2149 ira_allocno_t a;
2150 ira_allocno_iterator ai;
2151 cost_classes_t cost_classes_ptr;
2152
2153 ira_assert (allocno_p);
2154 FOR_EACH_ALLOCNO (a, ai)
2155 {
2156 i = ALLOCNO_NUM (a);
2157 regno = ALLOCNO_REGNO (a);
2158 aclass = regno_aclass[regno];
2159 cost_classes_ptr = regno_cost_classes[regno];
2160 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2161 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2162 ira_set_allocno_class (a, aclass);
2163 if (aclass == NO_REGS)
2164 continue;
2165 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2166 {
2167 n = ira_class_hard_regs_num[aclass];
2168 ALLOCNO_HARD_REG_COSTS (a)
2169 = reg_costs = ira_allocate_cost_vector (aclass);
2170 for (j = n - 1; j >= 0; j--)
2171 {
2172 hard_regno = ira_class_hard_regs[aclass][j];
2173 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2174 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2175 else
2176 {
2177 rclass = REGNO_REG_CLASS (hard_regno);
2178 num = cost_classes_ptr->index[rclass];
2179 if (num < 0)
2180 {
2181 num = cost_classes_ptr->hard_regno_index[hard_regno];
2182 ira_assert (num >= 0);
2183 }
2184 reg_costs[j] = COSTS (costs, i)->cost[num];
2185 }
2186 }
2187 }
2188 }
2189 if (optimize)
2190 ira_traverse_loop_tree (true, ira_loop_tree_root,
2191 process_bb_node_for_hard_reg_moves, NULL);
2192 }
2193
2194 \f
2195
2196 /* Function called once during compiler work. */
2197 void
2198 ira_init_costs_once (void)
2199 {
2200 int i;
2201
2202 init_cost = NULL;
2203 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2204 {
2205 op_costs[i] = NULL;
2206 this_op_costs[i] = NULL;
2207 }
2208 temp_costs = NULL;
2209 }
2210
2211 /* Free allocated temporary cost vectors. */
2212 void
2213 target_ira_int::free_ira_costs ()
2214 {
2215 int i;
2216
2217 free (x_init_cost);
2218 x_init_cost = NULL;
2219 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2220 {
2221 free (x_op_costs[i]);
2222 free (x_this_op_costs[i]);
2223 x_op_costs[i] = x_this_op_costs[i] = NULL;
2224 }
2225 free (x_temp_costs);
2226 x_temp_costs = NULL;
2227 }
2228
2229 /* This is called each time register related information is
2230 changed. */
2231 void
2232 ira_init_costs (void)
2233 {
2234 int i;
2235
2236 this_target_ira_int->free_ira_costs ();
2237 max_struct_costs_size
2238 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2239 /* Don't use ira_allocate because vectors live through several IRA
2240 calls. */
2241 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2242 init_cost->mem_cost = 1000000;
2243 for (i = 0; i < ira_important_classes_num; i++)
2244 init_cost->cost[i] = 1000000;
2245 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2246 {
2247 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2248 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2249 }
2250 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2251 }
2252
2253 \f
2254
2255 /* Common initialization function for ira_costs and
2256 ira_set_pseudo_classes. */
2257 static void
2258 init_costs (void)
2259 {
2260 init_subregs_of_mode ();
2261 costs = (struct costs *) ira_allocate (max_struct_costs_size
2262 * cost_elements_num);
2263 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2264 * cost_elements_num);
2265 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2266 * max_reg_num ());
2267 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2268 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2269 }
2270
2271 /* Common finalization function for ira_costs and
2272 ira_set_pseudo_classes. */
2273 static void
2274 finish_costs (void)
2275 {
2276 finish_subregs_of_mode ();
2277 ira_free (regno_equiv_gains);
2278 ira_free (regno_aclass);
2279 ira_free (pref_buffer);
2280 ira_free (costs);
2281 }
2282
2283 /* Entry function which defines register class, memory and hard
2284 register costs for each allocno. */
2285 void
2286 ira_costs (void)
2287 {
2288 allocno_p = true;
2289 cost_elements_num = ira_allocnos_num;
2290 init_costs ();
2291 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2292 * ira_allocnos_num);
2293 initiate_regno_cost_classes ();
2294 calculate_elim_costs_all_insns ();
2295 find_costs_and_classes (ira_dump_file);
2296 setup_allocno_class_and_costs ();
2297 finish_regno_cost_classes ();
2298 finish_costs ();
2299 ira_free (total_allocno_costs);
2300 }
2301
2302 /* Entry function which defines classes for pseudos.
2303 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2304 void
2305 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2306 {
2307 allocno_p = false;
2308 internal_flag_ira_verbose = flag_ira_verbose;
2309 cost_elements_num = max_reg_num ();
2310 init_costs ();
2311 initiate_regno_cost_classes ();
2312 find_costs_and_classes (dump_file);
2313 finish_regno_cost_classes ();
2314 if (define_pseudo_classes)
2315 pseudo_classes_defined_p = true;
2316
2317 finish_costs ();
2318 }
2319
2320 \f
2321
2322 /* Change hard register costs for allocnos which lives through
2323 function calls. This is called only when we found all intersected
2324 calls during building allocno live ranges. */
2325 void
2326 ira_tune_allocno_costs (void)
2327 {
2328 int j, n, regno;
2329 int cost, min_cost, *reg_costs;
2330 enum reg_class aclass, rclass;
2331 machine_mode mode;
2332 ira_allocno_t a;
2333 ira_allocno_iterator ai;
2334 ira_allocno_object_iterator oi;
2335 ira_object_t obj;
2336 bool skip_p;
2337 HARD_REG_SET *crossed_calls_clobber_regs;
2338
2339 FOR_EACH_ALLOCNO (a, ai)
2340 {
2341 aclass = ALLOCNO_CLASS (a);
2342 if (aclass == NO_REGS)
2343 continue;
2344 mode = ALLOCNO_MODE (a);
2345 n = ira_class_hard_regs_num[aclass];
2346 min_cost = INT_MAX;
2347 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2348 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2349 {
2350 ira_allocate_and_set_costs
2351 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2352 ALLOCNO_CLASS_COST (a));
2353 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2354 for (j = n - 1; j >= 0; j--)
2355 {
2356 regno = ira_class_hard_regs[aclass][j];
2357 skip_p = false;
2358 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2359 {
2360 if (ira_hard_reg_set_intersection_p (regno, mode,
2361 OBJECT_CONFLICT_HARD_REGS
2362 (obj)))
2363 {
2364 skip_p = true;
2365 break;
2366 }
2367 }
2368 if (skip_p)
2369 continue;
2370 rclass = REGNO_REG_CLASS (regno);
2371 cost = 0;
2372 crossed_calls_clobber_regs
2373 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2374 if (ira_hard_reg_set_intersection_p (regno, mode,
2375 *crossed_calls_clobber_regs)
2376 && (ira_hard_reg_set_intersection_p (regno, mode,
2377 call_used_reg_set)
2378 || targetm.hard_regno_call_part_clobbered (regno,
2379 mode)))
2380 cost += (ALLOCNO_CALL_FREQ (a)
2381 * (ira_memory_move_cost[mode][rclass][0]
2382 + ira_memory_move_cost[mode][rclass][1]));
2383 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2384 cost += ((ira_memory_move_cost[mode][rclass][0]
2385 + ira_memory_move_cost[mode][rclass][1])
2386 * ALLOCNO_FREQ (a)
2387 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2388 #endif
2389 if (INT_MAX - cost < reg_costs[j])
2390 reg_costs[j] = INT_MAX;
2391 else
2392 reg_costs[j] += cost;
2393 if (min_cost > reg_costs[j])
2394 min_cost = reg_costs[j];
2395 }
2396 }
2397 if (min_cost != INT_MAX)
2398 ALLOCNO_CLASS_COST (a) = min_cost;
2399
2400 /* Some targets allow pseudos to be allocated to unaligned sequences
2401 of hard registers. However, selecting an unaligned sequence can
2402 unnecessarily restrict later allocations. So increase the cost of
2403 unaligned hard regs to encourage the use of aligned hard regs. */
2404 {
2405 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2406
2407 if (nregs > 1)
2408 {
2409 ira_allocate_and_set_costs
2410 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2411 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2412 for (j = n - 1; j >= 0; j--)
2413 {
2414 regno = ira_non_ordered_class_hard_regs[aclass][j];
2415 if ((regno % nregs) != 0)
2416 {
2417 int index = ira_class_hard_reg_index[aclass][regno];
2418 ira_assert (index != -1);
2419 reg_costs[index] += ALLOCNO_FREQ (a);
2420 }
2421 }
2422 }
2423 }
2424 }
2425 }
2426
2427 /* Add COST to the estimated gain for eliminating REGNO with its
2428 equivalence. If COST is zero, record that no such elimination is
2429 possible. */
2430
2431 void
2432 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2433 {
2434 if (cost == 0)
2435 regno_equiv_gains[regno] = 0;
2436 else
2437 regno_equiv_gains[regno] += cost;
2438 }
2439
2440 void
2441 ira_costs_c_finalize (void)
2442 {
2443 this_target_ira_int->free_ira_costs ();
2444 }