]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-costs.c
Update libbid according to the latest Intel Decimal Floating-Point Math Library.
[thirdparty/gcc.git] / gcc / ira-costs.c
1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2 Copyright (C) 2006-2019 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
37
38 /* The flags is set up every time when we calculate pseudo register
39 classes through function ira_set_pseudo_classes. */
40 static bool pseudo_classes_defined_p = false;
41
42 /* TRUE if we work with allocnos. Otherwise we work with pseudos. */
43 static bool allocno_p;
44
45 /* Number of elements in array `costs'. */
46 static int cost_elements_num;
47
48 /* The `costs' struct records the cost of using hard registers of each
49 class considered for the calculation and of using memory for each
50 allocno or pseudo. */
51 struct costs
52 {
53 int mem_cost;
54 /* Costs for register classes start here. We process only some
55 allocno classes. */
56 int cost[1];
57 };
58
59 #define max_struct_costs_size \
60 (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62 (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64 (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66 (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68 (this_target_ira_int->x_this_op_costs)
69
70 /* Costs of each class for each allocno or pseudo. */
71 static struct costs *costs;
72
73 /* Accumulated costs of each class for each allocno. */
74 static struct costs *total_allocno_costs;
75
76 /* It is the current size of struct costs. */
77 static size_t struct_costs_size;
78
79 /* Return pointer to structure containing costs of allocno or pseudo
80 with given NUM in array ARR. */
81 #define COSTS(arr, num) \
82 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83
84 /* Return index in COSTS when processing reg with REGNO. */
85 #define COST_INDEX(regno) (allocno_p \
86 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 : (int) regno)
88
89 /* Record register class preferences of each allocno or pseudo. Null
90 value means no preferences. It happens on the 1st iteration of the
91 cost calculation. */
92 static enum reg_class *pref;
93
94 /* Allocated buffers for pref. */
95 static enum reg_class *pref_buffer;
96
97 /* Record allocno class of each allocno with the same regno. */
98 static enum reg_class *regno_aclass;
99
100 /* Record cost gains for not allocating a register with an invariant
101 equivalence. */
102 static int *regno_equiv_gains;
103
104 /* Execution frequency of the current insn. */
105 static int frequency;
106
107 \f
108
109 /* Info about reg classes whose costs are calculated for a pseudo. */
110 struct cost_classes
111 {
112 /* Number of the cost classes in the subsequent array. */
113 int num;
114 /* Container of the cost classes. */
115 enum reg_class classes[N_REG_CLASSES];
116 /* Map reg class -> index of the reg class in the previous array.
117 -1 if it is not a cost class. */
118 int index[N_REG_CLASSES];
119 /* Map hard regno index of first class in array CLASSES containing
120 the hard regno, -1 otherwise. */
121 int hard_regno_index[FIRST_PSEUDO_REGISTER];
122 };
123
124 /* Types of pointers to the structure above. */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
127
128 /* Info about cost classes for each pseudo. */
129 static cost_classes_t *regno_cost_classes;
130
131 /* Helper for cost_classes hashing. */
132
133 struct cost_classes_hasher : pointer_hash <cost_classes>
134 {
135 static inline hashval_t hash (const cost_classes *);
136 static inline bool equal (const cost_classes *, const cost_classes *);
137 static inline void remove (cost_classes *);
138 };
139
140 /* Returns hash value for cost classes info HV. */
141 inline hashval_t
142 cost_classes_hasher::hash (const cost_classes *hv)
143 {
144 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
145 }
146
147 /* Compares cost classes info HV1 and HV2. */
148 inline bool
149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 {
151 return (hv1->num == hv2->num
152 && memcmp (hv1->classes, hv2->classes,
153 sizeof (enum reg_class) * hv1->num) == 0);
154 }
155
156 /* Delete cost classes info V from the hash table. */
157 inline void
158 cost_classes_hasher::remove (cost_classes *v)
159 {
160 ira_free (v);
161 }
162
163 /* Hash table of unique cost classes. */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
165
166 /* Map allocno class -> cost classes for pseudo of given allocno
167 class. */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169
170 /* Map mode -> cost classes for pseudo of give mode. */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172
173 /* Cost classes that include all classes in ira_important_classes. */
174 static cost_classes all_cost_classes;
175
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177 the structure. */
178 static void
179 complete_cost_classes (cost_classes_t classes_ptr)
180 {
181 for (int i = 0; i < N_REG_CLASSES; i++)
182 classes_ptr->index[i] = -1;
183 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184 classes_ptr->hard_regno_index[i] = -1;
185 for (int i = 0; i < classes_ptr->num; i++)
186 {
187 enum reg_class cl = classes_ptr->classes[i];
188 classes_ptr->index[cl] = i;
189 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 {
191 unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 classes_ptr->hard_regno_index[hard_regno] = i;
194 }
195 }
196 }
197
198 /* Initialize info about the cost classes for each pseudo. */
199 static void
200 initiate_regno_cost_classes (void)
201 {
202 int size = sizeof (cost_classes_t) * max_reg_num ();
203
204 regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205 memset (regno_cost_classes, 0, size);
206 memset (cost_classes_aclass_cache, 0,
207 sizeof (cost_classes_t) * N_REG_CLASSES);
208 memset (cost_classes_mode_cache, 0,
209 sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210 cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211 all_cost_classes.num = ira_important_classes_num;
212 for (int i = 0; i < ira_important_classes_num; i++)
213 all_cost_classes.classes[i] = ira_important_classes[i];
214 complete_cost_classes (&all_cost_classes);
215 }
216
217 /* Create new cost classes from cost classes FROM and set up members
218 index and hard_regno_index. Return the new classes. The function
219 implements some common code of two functions
220 setup_regno_cost_classes_by_aclass and
221 setup_regno_cost_classes_by_mode. */
222 static cost_classes_t
223 setup_cost_classes (cost_classes_t from)
224 {
225 cost_classes_t classes_ptr;
226
227 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228 classes_ptr->num = from->num;
229 for (int i = 0; i < from->num; i++)
230 classes_ptr->classes[i] = from->classes[i];
231 complete_cost_classes (classes_ptr);
232 return classes_ptr;
233 }
234
235 /* Return a version of FULL that only considers registers in REGS that are
236 valid for mode MODE. Both FULL and the returned class are globally
237 allocated. */
238 static cost_classes_t
239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 const HARD_REG_SET &regs)
241 {
242 static struct cost_classes narrow;
243 int map[N_REG_CLASSES];
244 narrow.num = 0;
245 for (int i = 0; i < full->num; i++)
246 {
247 /* Assume that we'll drop the class. */
248 map[i] = -1;
249
250 /* Ignore classes that are too small for the mode. */
251 enum reg_class cl = full->classes[i];
252 if (!contains_reg_of_mode[cl][mode])
253 continue;
254
255 /* Calculate the set of registers in CL that belong to REGS and
256 are valid for MODE. */
257 HARD_REG_SET valid_for_cl;
258 COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
259 AND_HARD_REG_SET (valid_for_cl, regs);
260 AND_COMPL_HARD_REG_SET (valid_for_cl,
261 ira_prohibited_class_mode_regs[cl][mode]);
262 AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
263 if (hard_reg_set_empty_p (valid_for_cl))
264 continue;
265
266 /* Don't use this class if the set of valid registers is a subset
267 of an existing class. For example, suppose we have two classes
268 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose
269 that the mode changes allowed by FR_REGS are not as general as
270 the mode changes allowed by GR_REGS.
271
272 In this situation, the mode changes for GR_AND_FR_REGS could
273 either be seen as the union or the intersection of the mode
274 changes allowed by the two subclasses. The justification for
275 the union-based definition would be that, if you want a mode
276 change that's only allowed by GR_REGS, you can pick a register
277 from the GR_REGS subclass. The justification for the
278 intersection-based definition would be that every register
279 from the class would allow the mode change.
280
281 However, if we have a register that needs to be in GR_REGS,
282 using GR_AND_FR_REGS with the intersection-based definition
283 would be too pessimistic, since it would bring in restrictions
284 that only apply to FR_REGS. Conversely, if we have a register
285 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
286 union-based definition would lose the extra restrictions
287 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful
288 for cases where GR_REGS and FP_REGS are both valid. */
289 int pos;
290 for (pos = 0; pos < narrow.num; ++pos)
291 {
292 enum reg_class cl2 = narrow.classes[pos];
293 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
294 break;
295 }
296 map[i] = pos;
297 if (pos == narrow.num)
298 {
299 /* If several classes are equivalent, prefer to use the one
300 that was chosen as the allocno class. */
301 enum reg_class cl2 = ira_allocno_class_translate[cl];
302 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
303 cl = cl2;
304 narrow.classes[narrow.num++] = cl;
305 }
306 }
307 if (narrow.num == full->num)
308 return full;
309
310 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
311 if (*slot == NULL)
312 {
313 cost_classes_t classes = setup_cost_classes (&narrow);
314 /* Map equivalent classes to the representative that we chose above. */
315 for (int i = 0; i < ira_important_classes_num; i++)
316 {
317 enum reg_class cl = ira_important_classes[i];
318 int index = full->index[cl];
319 if (index >= 0)
320 classes->index[cl] = map[index];
321 }
322 *slot = classes;
323 }
324 return *slot;
325 }
326
327 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
328 This function is used when we know an initial approximation of
329 allocno class of the pseudo already, e.g. on the second iteration
330 of class cost calculation or after class cost calculation in
331 register-pressure sensitive insn scheduling or register-pressure
332 sensitive loop-invariant motion. */
333 static void
334 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
335 {
336 static struct cost_classes classes;
337 cost_classes_t classes_ptr;
338 enum reg_class cl;
339 int i;
340 cost_classes **slot;
341 HARD_REG_SET temp, temp2;
342 bool exclude_p;
343
344 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
345 {
346 COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
347 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
348 /* We exclude classes from consideration which are subsets of
349 ACLASS only if ACLASS is an uniform class. */
350 exclude_p = ira_uniform_class_p[aclass];
351 classes.num = 0;
352 for (i = 0; i < ira_important_classes_num; i++)
353 {
354 cl = ira_important_classes[i];
355 if (exclude_p)
356 {
357 /* Exclude non-uniform classes which are subsets of
358 ACLASS. */
359 COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
360 AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
361 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
362 continue;
363 }
364 classes.classes[classes.num++] = cl;
365 }
366 slot = cost_classes_htab->find_slot (&classes, INSERT);
367 if (*slot == NULL)
368 {
369 classes_ptr = setup_cost_classes (&classes);
370 *slot = classes_ptr;
371 }
372 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
373 }
374 if (regno_reg_rtx[regno] != NULL_RTX)
375 {
376 /* Restrict the classes to those that are valid for REGNO's mode
377 (which might for example exclude singleton classes if the mode
378 requires two registers). Also restrict the classes to those that
379 are valid for subregs of REGNO. */
380 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
381 if (!valid_regs)
382 valid_regs = &reg_class_contents[ALL_REGS];
383 classes_ptr = restrict_cost_classes (classes_ptr,
384 PSEUDO_REGNO_MODE (regno),
385 *valid_regs);
386 }
387 regno_cost_classes[regno] = classes_ptr;
388 }
389
390 /* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can
391 decrease number of cost classes for the pseudo, if hard registers
392 of some important classes cannot hold a value of MODE. So the
393 pseudo cannot get hard register of some important classes and cost
394 calculation for such important classes is only wasting CPU
395 time. */
396 static void
397 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
398 {
399 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
400 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
401 mode, *valid_regs);
402 else
403 {
404 if (cost_classes_mode_cache[mode] == NULL)
405 cost_classes_mode_cache[mode]
406 = restrict_cost_classes (&all_cost_classes, mode,
407 reg_class_contents[ALL_REGS]);
408 regno_cost_classes[regno] = cost_classes_mode_cache[mode];
409 }
410 }
411
412 /* Finalize info about the cost classes for each pseudo. */
413 static void
414 finish_regno_cost_classes (void)
415 {
416 ira_free (regno_cost_classes);
417 delete cost_classes_htab;
418 cost_classes_htab = NULL;
419 }
420
421 \f
422
423 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
424 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not
425 be a pseudo register. */
426 static int
427 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
428 secondary_reload_info *prev_sri)
429 {
430 secondary_reload_info sri;
431 reg_class_t secondary_class = NO_REGS;
432
433 /* If X is a SCRATCH, there is actually nothing to move since we are
434 assuming optimal allocation. */
435 if (GET_CODE (x) == SCRATCH)
436 return 0;
437
438 /* Get the class we will actually use for a reload. */
439 rclass = targetm.preferred_reload_class (x, rclass);
440
441 /* If we need a secondary reload for an intermediate, the cost is
442 that to load the input into the intermediate register, then to
443 copy it. */
444 sri.prev_sri = prev_sri;
445 sri.extra_cost = 0;
446 /* PR 68770: Secondary reload might examine the t_icode field. */
447 sri.t_icode = CODE_FOR_nothing;
448
449 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
450
451 if (secondary_class != NO_REGS)
452 {
453 ira_init_register_move_cost_if_necessary (mode);
454 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
455 + sri.extra_cost
456 + copy_cost (x, mode, secondary_class, to_p, &sri));
457 }
458
459 /* For memory, use the memory move cost, for (hard) registers, use
460 the cost to move between the register classes, and use 2 for
461 everything else (constants). */
462 if (MEM_P (x) || rclass == NO_REGS)
463 return sri.extra_cost
464 + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
465 else if (REG_P (x))
466 {
467 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
468
469 ira_init_register_move_cost_if_necessary (mode);
470 return (sri.extra_cost
471 + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
472 }
473 else
474 /* If this is a constant, we may eventually want to call rtx_cost
475 here. */
476 return sri.extra_cost + COSTS_N_INSNS (1);
477 }
478
479 \f
480
481 /* Record the cost of using memory or hard registers of various
482 classes for the operands in INSN.
483
484 N_ALTS is the number of alternatives.
485 N_OPS is the number of operands.
486 OPS is an array of the operands.
487 MODES are the modes of the operands, in case any are VOIDmode.
488 CONSTRAINTS are the constraints to use for the operands. This array
489 is modified by this procedure.
490
491 This procedure works alternative by alternative. For each
492 alternative we assume that we will be able to allocate all allocnos
493 to their ideal register class and calculate the cost of using that
494 alternative. Then we compute, for each operand that is a
495 pseudo-register, the cost of having the allocno allocated to each
496 register class and using it in that alternative. To this cost is
497 added the cost of the alternative.
498
499 The cost of each class for this insn is its lowest cost among all
500 the alternatives. */
501 static void
502 record_reg_classes (int n_alts, int n_ops, rtx *ops,
503 machine_mode *modes, const char **constraints,
504 rtx_insn *insn, enum reg_class *pref)
505 {
506 int alt;
507 int i, j, k;
508 int insn_allows_mem[MAX_RECOG_OPERANDS];
509 move_table *move_in_cost, *move_out_cost;
510 short (*mem_cost)[2];
511
512 for (i = 0; i < n_ops; i++)
513 insn_allows_mem[i] = 0;
514
515 /* Process each alternative, each time minimizing an operand's cost
516 with the cost for each operand in that alternative. */
517 alternative_mask preferred = get_preferred_alternatives (insn);
518 for (alt = 0; alt < n_alts; alt++)
519 {
520 enum reg_class classes[MAX_RECOG_OPERANDS];
521 int allows_mem[MAX_RECOG_OPERANDS];
522 enum reg_class rclass;
523 int alt_fail = 0;
524 int alt_cost = 0, op_cost_add;
525
526 if (!TEST_BIT (preferred, alt))
527 {
528 for (i = 0; i < recog_data.n_operands; i++)
529 constraints[i] = skip_alternative (constraints[i]);
530
531 continue;
532 }
533
534 for (i = 0; i < n_ops; i++)
535 {
536 unsigned char c;
537 const char *p = constraints[i];
538 rtx op = ops[i];
539 machine_mode mode = modes[i];
540 int allows_addr = 0;
541 int win = 0;
542
543 /* Initially show we know nothing about the register class. */
544 classes[i] = NO_REGS;
545 allows_mem[i] = 0;
546
547 /* If this operand has no constraints at all, we can
548 conclude nothing about it since anything is valid. */
549 if (*p == 0)
550 {
551 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
552 memset (this_op_costs[i], 0, struct_costs_size);
553 continue;
554 }
555
556 /* If this alternative is only relevant when this operand
557 matches a previous operand, we do different things
558 depending on whether this operand is a allocno-reg or not.
559 We must process any modifiers for the operand before we
560 can make this test. */
561 while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
562 p++;
563
564 if (p[0] >= '0' && p[0] <= '0' + i)
565 {
566 /* Copy class and whether memory is allowed from the
567 matching alternative. Then perform any needed cost
568 computations and/or adjustments. */
569 j = p[0] - '0';
570 classes[i] = classes[j];
571 allows_mem[i] = allows_mem[j];
572 if (allows_mem[i])
573 insn_allows_mem[i] = 1;
574
575 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
576 {
577 /* If this matches the other operand, we have no
578 added cost and we win. */
579 if (rtx_equal_p (ops[j], op))
580 win = 1;
581 /* If we can put the other operand into a register,
582 add to the cost of this alternative the cost to
583 copy this operand to the register used for the
584 other operand. */
585 else if (classes[j] != NO_REGS)
586 {
587 alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
588 win = 1;
589 }
590 }
591 else if (! REG_P (ops[j])
592 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
593 {
594 /* This op is an allocno but the one it matches is
595 not. */
596
597 /* If we can't put the other operand into a
598 register, this alternative can't be used. */
599
600 if (classes[j] == NO_REGS)
601 alt_fail = 1;
602 /* Otherwise, add to the cost of this alternative
603 the cost to copy the other operand to the hard
604 register used for this operand. */
605 else
606 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
607 }
608 else
609 {
610 /* The costs of this operand are not the same as the
611 other operand since move costs are not symmetric.
612 Moreover, if we cannot tie them, this alternative
613 needs to do a copy, which is one insn. */
614 struct costs *pp = this_op_costs[i];
615 int *pp_costs = pp->cost;
616 cost_classes_t cost_classes_ptr
617 = regno_cost_classes[REGNO (op)];
618 enum reg_class *cost_classes = cost_classes_ptr->classes;
619 bool in_p = recog_data.operand_type[i] != OP_OUT;
620 bool out_p = recog_data.operand_type[i] != OP_IN;
621 enum reg_class op_class = classes[i];
622
623 ira_init_register_move_cost_if_necessary (mode);
624 if (! in_p)
625 {
626 ira_assert (out_p);
627 if (op_class == NO_REGS)
628 {
629 mem_cost = ira_memory_move_cost[mode];
630 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
631 {
632 rclass = cost_classes[k];
633 pp_costs[k] = mem_cost[rclass][0] * frequency;
634 }
635 }
636 else
637 {
638 move_out_cost = ira_may_move_out_cost[mode];
639 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
640 {
641 rclass = cost_classes[k];
642 pp_costs[k]
643 = move_out_cost[op_class][rclass] * frequency;
644 }
645 }
646 }
647 else if (! out_p)
648 {
649 ira_assert (in_p);
650 if (op_class == NO_REGS)
651 {
652 mem_cost = ira_memory_move_cost[mode];
653 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 {
655 rclass = cost_classes[k];
656 pp_costs[k] = mem_cost[rclass][1] * frequency;
657 }
658 }
659 else
660 {
661 move_in_cost = ira_may_move_in_cost[mode];
662 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 {
664 rclass = cost_classes[k];
665 pp_costs[k]
666 = move_in_cost[rclass][op_class] * frequency;
667 }
668 }
669 }
670 else
671 {
672 if (op_class == NO_REGS)
673 {
674 mem_cost = ira_memory_move_cost[mode];
675 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
676 {
677 rclass = cost_classes[k];
678 pp_costs[k] = ((mem_cost[rclass][0]
679 + mem_cost[rclass][1])
680 * frequency);
681 }
682 }
683 else
684 {
685 move_in_cost = ira_may_move_in_cost[mode];
686 move_out_cost = ira_may_move_out_cost[mode];
687 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
688 {
689 rclass = cost_classes[k];
690 pp_costs[k] = ((move_in_cost[rclass][op_class]
691 + move_out_cost[op_class][rclass])
692 * frequency);
693 }
694 }
695 }
696
697 /* If the alternative actually allows memory, make
698 things a bit cheaper since we won't need an extra
699 insn to load it. */
700 pp->mem_cost
701 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
702 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
703 - allows_mem[i]) * frequency;
704
705 /* If we have assigned a class to this allocno in
706 our first pass, add a cost to this alternative
707 corresponding to what we would add if this
708 allocno were not in the appropriate class. */
709 if (pref)
710 {
711 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
712
713 if (pref_class == NO_REGS)
714 alt_cost
715 += ((out_p
716 ? ira_memory_move_cost[mode][op_class][0] : 0)
717 + (in_p
718 ? ira_memory_move_cost[mode][op_class][1]
719 : 0));
720 else if (ira_reg_class_intersect
721 [pref_class][op_class] == NO_REGS)
722 alt_cost
723 += ira_register_move_cost[mode][pref_class][op_class];
724 }
725 if (REGNO (ops[i]) != REGNO (ops[j])
726 && ! find_reg_note (insn, REG_DEAD, op))
727 alt_cost += 2;
728
729 p++;
730 }
731 }
732
733 /* Scan all the constraint letters. See if the operand
734 matches any of the constraints. Collect the valid
735 register classes and see if this operand accepts
736 memory. */
737 while ((c = *p))
738 {
739 switch (c)
740 {
741 case '*':
742 /* Ignore the next letter for this pass. */
743 c = *++p;
744 break;
745
746 case '^':
747 alt_cost += 2;
748 break;
749
750 case '?':
751 alt_cost += 2;
752 break;
753
754 case 'g':
755 if (MEM_P (op)
756 || (CONSTANT_P (op)
757 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
758 win = 1;
759 insn_allows_mem[i] = allows_mem[i] = 1;
760 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
761 break;
762
763 default:
764 enum constraint_num cn = lookup_constraint (p);
765 enum reg_class cl;
766 switch (get_constraint_type (cn))
767 {
768 case CT_REGISTER:
769 cl = reg_class_for_constraint (cn);
770 if (cl != NO_REGS)
771 classes[i] = ira_reg_class_subunion[classes[i]][cl];
772 break;
773
774 case CT_CONST_INT:
775 if (CONST_INT_P (op)
776 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
777 win = 1;
778 break;
779
780 case CT_MEMORY:
781 /* Every MEM can be reloaded to fit. */
782 insn_allows_mem[i] = allows_mem[i] = 1;
783 if (MEM_P (op))
784 win = 1;
785 break;
786
787 case CT_SPECIAL_MEMORY:
788 insn_allows_mem[i] = allows_mem[i] = 1;
789 if (MEM_P (op) && constraint_satisfied_p (op, cn))
790 win = 1;
791 break;
792
793 case CT_ADDRESS:
794 /* Every address can be reloaded to fit. */
795 allows_addr = 1;
796 if (address_operand (op, GET_MODE (op))
797 || constraint_satisfied_p (op, cn))
798 win = 1;
799 /* We know this operand is an address, so we
800 want it to be allocated to a hard register
801 that can be the base of an address,
802 i.e. BASE_REG_CLASS. */
803 classes[i]
804 = ira_reg_class_subunion[classes[i]]
805 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
806 ADDRESS, SCRATCH)];
807 break;
808
809 case CT_FIXED_FORM:
810 if (constraint_satisfied_p (op, cn))
811 win = 1;
812 break;
813 }
814 break;
815 }
816 p += CONSTRAINT_LEN (c, p);
817 if (c == ',')
818 break;
819 }
820
821 constraints[i] = p;
822
823 if (alt_fail)
824 break;
825
826 /* How we account for this operand now depends on whether it
827 is a pseudo register or not. If it is, we first check if
828 any register classes are valid. If not, we ignore this
829 alternative, since we want to assume that all allocnos get
830 allocated for register preferencing. If some register
831 class is valid, compute the costs of moving the allocno
832 into that class. */
833 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
834 {
835 if (classes[i] == NO_REGS && ! allows_mem[i])
836 {
837 /* We must always fail if the operand is a REG, but
838 we did not find a suitable class and memory is
839 not allowed.
840
841 Otherwise we may perform an uninitialized read
842 from this_op_costs after the `continue' statement
843 below. */
844 alt_fail = 1;
845 }
846 else
847 {
848 unsigned int regno = REGNO (op);
849 struct costs *pp = this_op_costs[i];
850 int *pp_costs = pp->cost;
851 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
852 enum reg_class *cost_classes = cost_classes_ptr->classes;
853 bool in_p = recog_data.operand_type[i] != OP_OUT;
854 bool out_p = recog_data.operand_type[i] != OP_IN;
855 enum reg_class op_class = classes[i];
856
857 ira_init_register_move_cost_if_necessary (mode);
858 if (! in_p)
859 {
860 ira_assert (out_p);
861 if (op_class == NO_REGS)
862 {
863 mem_cost = ira_memory_move_cost[mode];
864 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
865 {
866 rclass = cost_classes[k];
867 pp_costs[k] = mem_cost[rclass][0] * frequency;
868 }
869 }
870 else
871 {
872 move_out_cost = ira_may_move_out_cost[mode];
873 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
874 {
875 rclass = cost_classes[k];
876 pp_costs[k]
877 = move_out_cost[op_class][rclass] * frequency;
878 }
879 }
880 }
881 else if (! out_p)
882 {
883 ira_assert (in_p);
884 if (op_class == NO_REGS)
885 {
886 mem_cost = ira_memory_move_cost[mode];
887 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
888 {
889 rclass = cost_classes[k];
890 pp_costs[k] = mem_cost[rclass][1] * frequency;
891 }
892 }
893 else
894 {
895 move_in_cost = ira_may_move_in_cost[mode];
896 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
897 {
898 rclass = cost_classes[k];
899 pp_costs[k]
900 = move_in_cost[rclass][op_class] * frequency;
901 }
902 }
903 }
904 else
905 {
906 if (op_class == NO_REGS)
907 {
908 mem_cost = ira_memory_move_cost[mode];
909 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
910 {
911 rclass = cost_classes[k];
912 pp_costs[k] = ((mem_cost[rclass][0]
913 + mem_cost[rclass][1])
914 * frequency);
915 }
916 }
917 else
918 {
919 move_in_cost = ira_may_move_in_cost[mode];
920 move_out_cost = ira_may_move_out_cost[mode];
921 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
922 {
923 rclass = cost_classes[k];
924 pp_costs[k] = ((move_in_cost[rclass][op_class]
925 + move_out_cost[op_class][rclass])
926 * frequency);
927 }
928 }
929 }
930
931 if (op_class == NO_REGS)
932 /* Although we don't need insn to reload from
933 memory, still accessing memory is usually more
934 expensive than a register. */
935 pp->mem_cost = frequency;
936 else
937 /* If the alternative actually allows memory, make
938 things a bit cheaper since we won't need an
939 extra insn to load it. */
940 pp->mem_cost
941 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
942 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
943 - allows_mem[i]) * frequency;
944 /* If we have assigned a class to this allocno in
945 our first pass, add a cost to this alternative
946 corresponding to what we would add if this
947 allocno were not in the appropriate class. */
948 if (pref)
949 {
950 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
951
952 if (pref_class == NO_REGS)
953 {
954 if (op_class != NO_REGS)
955 alt_cost
956 += ((out_p
957 ? ira_memory_move_cost[mode][op_class][0]
958 : 0)
959 + (in_p
960 ? ira_memory_move_cost[mode][op_class][1]
961 : 0));
962 }
963 else if (op_class == NO_REGS)
964 alt_cost
965 += ((out_p
966 ? ira_memory_move_cost[mode][pref_class][1]
967 : 0)
968 + (in_p
969 ? ira_memory_move_cost[mode][pref_class][0]
970 : 0));
971 else if (ira_reg_class_intersect[pref_class][op_class]
972 == NO_REGS)
973 alt_cost += (ira_register_move_cost
974 [mode][pref_class][op_class]);
975 }
976 }
977 }
978
979 /* Otherwise, if this alternative wins, either because we
980 have already determined that or if we have a hard
981 register of the proper class, there is no cost for this
982 alternative. */
983 else if (win || (REG_P (op)
984 && reg_fits_class_p (op, classes[i],
985 0, GET_MODE (op))))
986 ;
987
988 /* If registers are valid, the cost of this alternative
989 includes copying the object to and/or from a
990 register. */
991 else if (classes[i] != NO_REGS)
992 {
993 if (recog_data.operand_type[i] != OP_OUT)
994 alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
995
996 if (recog_data.operand_type[i] != OP_IN)
997 alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
998 }
999 /* The only other way this alternative can be used is if
1000 this is a constant that could be placed into memory. */
1001 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1002 alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1003 else
1004 alt_fail = 1;
1005
1006 if (alt_fail)
1007 break;
1008 }
1009
1010 if (alt_fail)
1011 {
1012 /* The loop above might have exited early once the failure
1013 was seen. Skip over the constraints for the remaining
1014 operands. */
1015 i += 1;
1016 for (; i < n_ops; ++i)
1017 constraints[i] = skip_alternative (constraints[i]);
1018 continue;
1019 }
1020
1021 op_cost_add = alt_cost * frequency;
1022 /* Finally, update the costs with the information we've
1023 calculated about this alternative. */
1024 for (i = 0; i < n_ops; i++)
1025 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1026 {
1027 struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1028 int *pp_costs = pp->cost, *qq_costs = qq->cost;
1029 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1030 cost_classes_t cost_classes_ptr
1031 = regno_cost_classes[REGNO (ops[i])];
1032
1033 pp->mem_cost = MIN (pp->mem_cost,
1034 (qq->mem_cost + op_cost_add) * scale);
1035
1036 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1037 pp_costs[k]
1038 = MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1039 }
1040 }
1041
1042 if (allocno_p)
1043 for (i = 0; i < n_ops; i++)
1044 {
1045 ira_allocno_t a;
1046 rtx op = ops[i];
1047
1048 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1049 continue;
1050 a = ira_curr_regno_allocno_map [REGNO (op)];
1051 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1052 ALLOCNO_BAD_SPILL_P (a) = true;
1053 }
1054
1055 }
1056
1057 \f
1058
1059 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */
1060 static inline bool
1061 ok_for_index_p_nonstrict (rtx reg)
1062 {
1063 unsigned regno = REGNO (reg);
1064
1065 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1066 }
1067
1068 /* A version of regno_ok_for_base_p for use here, when all
1069 pseudo-registers should count as OK. Arguments as for
1070 regno_ok_for_base_p. */
1071 static inline bool
1072 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1073 enum rtx_code outer_code, enum rtx_code index_code)
1074 {
1075 unsigned regno = REGNO (reg);
1076
1077 if (regno >= FIRST_PSEUDO_REGISTER)
1078 return true;
1079 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1080 }
1081
1082 /* Record the pseudo registers we must reload into hard registers in a
1083 subexpression of a memory address, X.
1084
1085 If CONTEXT is 0, we are looking at the base part of an address,
1086 otherwise we are looking at the index part.
1087
1088 MODE and AS are the mode and address space of the memory reference;
1089 OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1090 These four arguments are passed down to base_reg_class.
1091
1092 SCALE is twice the amount to multiply the cost by (it is twice so
1093 we can represent half-cost adjustments). */
1094 static void
1095 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1096 int context, enum rtx_code outer_code,
1097 enum rtx_code index_code, int scale)
1098 {
1099 enum rtx_code code = GET_CODE (x);
1100 enum reg_class rclass;
1101
1102 if (context == 1)
1103 rclass = INDEX_REG_CLASS;
1104 else
1105 rclass = base_reg_class (mode, as, outer_code, index_code);
1106
1107 switch (code)
1108 {
1109 case CONST_INT:
1110 case CONST:
1111 case CC0:
1112 case PC:
1113 case SYMBOL_REF:
1114 case LABEL_REF:
1115 return;
1116
1117 case PLUS:
1118 /* When we have an address that is a sum, we must determine
1119 whether registers are "base" or "index" regs. If there is a
1120 sum of two registers, we must choose one to be the "base".
1121 Luckily, we can use the REG_POINTER to make a good choice
1122 most of the time. We only need to do this on machines that
1123 can have two registers in an address and where the base and
1124 index register classes are different.
1125
1126 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1127 but that seems bogus since it should only be set when we are
1128 sure the register is being used as a pointer. */
1129 {
1130 rtx arg0 = XEXP (x, 0);
1131 rtx arg1 = XEXP (x, 1);
1132 enum rtx_code code0 = GET_CODE (arg0);
1133 enum rtx_code code1 = GET_CODE (arg1);
1134
1135 /* Look inside subregs. */
1136 if (code0 == SUBREG)
1137 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1138 if (code1 == SUBREG)
1139 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1140
1141 /* If index registers do not appear, or coincide with base registers,
1142 just record registers in any non-constant operands. We
1143 assume here, as well as in the tests below, that all
1144 addresses are in canonical form. */
1145 if (MAX_REGS_PER_ADDRESS == 1
1146 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1147 {
1148 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1149 if (! CONSTANT_P (arg1))
1150 record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1151 }
1152
1153 /* If the second operand is a constant integer, it doesn't
1154 change what class the first operand must be. */
1155 else if (CONST_SCALAR_INT_P (arg1))
1156 record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1157 /* If the second operand is a symbolic constant, the first
1158 operand must be an index register. */
1159 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1160 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1161 /* If both operands are registers but one is already a hard
1162 register of index or reg-base class, give the other the
1163 class that the hard register is not. */
1164 else if (code0 == REG && code1 == REG
1165 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1166 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1167 || ok_for_index_p_nonstrict (arg0)))
1168 record_address_regs (mode, as, arg1,
1169 ok_for_base_p_nonstrict (arg0, mode, as,
1170 PLUS, REG) ? 1 : 0,
1171 PLUS, REG, scale);
1172 else if (code0 == REG && code1 == REG
1173 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1174 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1175 || ok_for_index_p_nonstrict (arg1)))
1176 record_address_regs (mode, as, arg0,
1177 ok_for_base_p_nonstrict (arg1, mode, as,
1178 PLUS, REG) ? 1 : 0,
1179 PLUS, REG, scale);
1180 /* If one operand is known to be a pointer, it must be the
1181 base with the other operand the index. Likewise if the
1182 other operand is a MULT. */
1183 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1184 {
1185 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1186 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1187 }
1188 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1189 {
1190 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1191 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1192 }
1193 /* Otherwise, count equal chances that each might be a base or
1194 index register. This case should be rare. */
1195 else
1196 {
1197 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1198 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1199 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1200 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1201 }
1202 }
1203 break;
1204
1205 /* Double the importance of an allocno that is incremented or
1206 decremented, since it would take two extra insns if it ends
1207 up in the wrong place. */
1208 case POST_MODIFY:
1209 case PRE_MODIFY:
1210 record_address_regs (mode, as, XEXP (x, 0), 0, code,
1211 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1212 if (REG_P (XEXP (XEXP (x, 1), 1)))
1213 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1214 2 * scale);
1215 break;
1216
1217 case POST_INC:
1218 case PRE_INC:
1219 case POST_DEC:
1220 case PRE_DEC:
1221 /* Double the importance of an allocno that is incremented or
1222 decremented, since it would take two extra insns if it ends
1223 up in the wrong place. */
1224 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1225 break;
1226
1227 case REG:
1228 {
1229 struct costs *pp;
1230 int *pp_costs;
1231 enum reg_class i;
1232 int k, regno, add_cost;
1233 cost_classes_t cost_classes_ptr;
1234 enum reg_class *cost_classes;
1235 move_table *move_in_cost;
1236
1237 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1238 break;
1239
1240 regno = REGNO (x);
1241 if (allocno_p)
1242 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1243 pp = COSTS (costs, COST_INDEX (regno));
1244 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1245 if (INT_MAX - add_cost < pp->mem_cost)
1246 pp->mem_cost = INT_MAX;
1247 else
1248 pp->mem_cost += add_cost;
1249 cost_classes_ptr = regno_cost_classes[regno];
1250 cost_classes = cost_classes_ptr->classes;
1251 pp_costs = pp->cost;
1252 ira_init_register_move_cost_if_necessary (Pmode);
1253 move_in_cost = ira_may_move_in_cost[Pmode];
1254 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1255 {
1256 i = cost_classes[k];
1257 add_cost = (move_in_cost[i][rclass] * scale) / 2;
1258 if (INT_MAX - add_cost < pp_costs[k])
1259 pp_costs[k] = INT_MAX;
1260 else
1261 pp_costs[k] += add_cost;
1262 }
1263 }
1264 break;
1265
1266 default:
1267 {
1268 const char *fmt = GET_RTX_FORMAT (code);
1269 int i;
1270 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1271 if (fmt[i] == 'e')
1272 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1273 scale);
1274 }
1275 }
1276 }
1277
1278 \f
1279
1280 /* Calculate the costs of insn operands. */
1281 static void
1282 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1283 {
1284 const char *constraints[MAX_RECOG_OPERANDS];
1285 machine_mode modes[MAX_RECOG_OPERANDS];
1286 rtx set;
1287 int i;
1288
1289 if ((set = single_set (insn)) != NULL_RTX
1290 /* In rare cases the single set insn might have less 2 operands
1291 as the source can be a fixed special reg. */
1292 && recog_data.n_operands > 1
1293 && recog_data.operand[0] == SET_DEST (set)
1294 && recog_data.operand[1] == SET_SRC (set))
1295 {
1296 int regno, other_regno;
1297 rtx dest = SET_DEST (set);
1298 rtx src = SET_SRC (set);
1299
1300 if (GET_CODE (dest) == SUBREG
1301 && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1302 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1303 dest = SUBREG_REG (dest);
1304 if (GET_CODE (src) == SUBREG
1305 && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1306 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1307 src = SUBREG_REG (src);
1308 if (REG_P (src) && REG_P (dest)
1309 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1310 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1311 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1312 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1313 {
1314 machine_mode mode = GET_MODE (SET_SRC (set));
1315 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1316 enum reg_class *cost_classes = cost_classes_ptr->classes;
1317 reg_class_t rclass, hard_reg_class, pref_class, bigger_hard_reg_class;
1318 int cost, k;
1319 move_table *move_costs;
1320 bool dead_p = find_regno_note (insn, REG_DEAD, REGNO (src));
1321
1322 ira_init_register_move_cost_if_necessary (mode);
1323 move_costs = ira_register_move_cost[mode];
1324 hard_reg_class = REGNO_REG_CLASS (other_regno);
1325 bigger_hard_reg_class = ira_pressure_class_translate[hard_reg_class];
1326 /* Target code may return any cost for mode which does not
1327 fit the the hard reg class (e.g. DImode for AREG on
1328 i386). Check this and use a bigger class to get the
1329 right cost. */
1330 if (bigger_hard_reg_class != NO_REGS
1331 && ! ira_hard_reg_in_set_p (other_regno, mode,
1332 reg_class_contents[hard_reg_class]))
1333 hard_reg_class = bigger_hard_reg_class;
1334 i = regno == (int) REGNO (src) ? 1 : 0;
1335 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1336 {
1337 rclass = cost_classes[k];
1338 cost = (i == 0
1339 ? move_costs[hard_reg_class][rclass]
1340 : move_costs[rclass][hard_reg_class]);
1341
1342 op_costs[i]->cost[k] = cost * frequency;
1343 /* If we have assigned a class to this allocno in our
1344 first pass, add a cost to this alternative
1345 corresponding to what we would add if this allocno
1346 were not in the appropriate class. */
1347 if (pref)
1348 {
1349 if ((pref_class = pref[COST_INDEX (regno)]) == NO_REGS)
1350 op_costs[i]->cost[k]
1351 += ((i == 0 ? ira_memory_move_cost[mode][rclass][0] : 0)
1352 + (i == 1 ? ira_memory_move_cost[mode][rclass][1] : 0)
1353 * frequency);
1354 else if (ira_reg_class_intersect[pref_class][rclass]
1355 == NO_REGS)
1356 op_costs[i]->cost[k]
1357 += (move_costs[pref_class][rclass]
1358 * frequency);
1359 }
1360 /* If this insn is a single set copying operand 1 to
1361 operand 0 and one operand is an allocno with the
1362 other a hard reg or an allocno that prefers a hard
1363 register that is in its own register class then we
1364 may want to adjust the cost of that register class to
1365 -1.
1366
1367 Avoid the adjustment if the source does not die to
1368 avoid stressing of register allocator by preferencing
1369 two colliding registers into single class. */
1370 if (dead_p
1371 && TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1372 && (reg_class_size[(int) rclass]
1373 == (ira_reg_class_max_nregs
1374 [(int) rclass][(int) GET_MODE(src)])))
1375 {
1376 if (reg_class_size[rclass] == 1)
1377 op_costs[i]->cost[k] = -frequency;
1378 else if (in_hard_reg_set_p (reg_class_contents[rclass],
1379 GET_MODE(src), other_regno))
1380 op_costs[i]->cost[k] = -frequency;
1381 }
1382 }
1383 op_costs[i]->mem_cost
1384 = ira_memory_move_cost[mode][hard_reg_class][i] * frequency;
1385 if (pref && (pref_class = pref[COST_INDEX (regno)]) != NO_REGS)
1386 op_costs[i]->mem_cost
1387 += ira_memory_move_cost[mode][pref_class][i] * frequency;
1388 return;
1389 }
1390 }
1391
1392 for (i = 0; i < recog_data.n_operands; i++)
1393 {
1394 constraints[i] = recog_data.constraints[i];
1395 modes[i] = recog_data.operand_mode[i];
1396 }
1397
1398 /* If we get here, we are set up to record the costs of all the
1399 operands for this insn. Start by initializing the costs. Then
1400 handle any address registers. Finally record the desired classes
1401 for any allocnos, doing it twice if some pair of operands are
1402 commutative. */
1403 for (i = 0; i < recog_data.n_operands; i++)
1404 {
1405 memcpy (op_costs[i], init_cost, struct_costs_size);
1406
1407 if (GET_CODE (recog_data.operand[i]) == SUBREG)
1408 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1409
1410 if (MEM_P (recog_data.operand[i]))
1411 record_address_regs (GET_MODE (recog_data.operand[i]),
1412 MEM_ADDR_SPACE (recog_data.operand[i]),
1413 XEXP (recog_data.operand[i], 0),
1414 0, MEM, SCRATCH, frequency * 2);
1415 else if (constraints[i][0] == 'p'
1416 || (insn_extra_address_constraint
1417 (lookup_constraint (constraints[i]))))
1418 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1419 recog_data.operand[i], 0, ADDRESS, SCRATCH,
1420 frequency * 2);
1421 }
1422
1423 /* Check for commutative in a separate loop so everything will have
1424 been initialized. We must do this even if one operand is a
1425 constant--see addsi3 in m68k.md. */
1426 for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1427 if (constraints[i][0] == '%')
1428 {
1429 const char *xconstraints[MAX_RECOG_OPERANDS];
1430 int j;
1431
1432 /* Handle commutative operands by swapping the
1433 constraints. We assume the modes are the same. */
1434 for (j = 0; j < recog_data.n_operands; j++)
1435 xconstraints[j] = constraints[j];
1436
1437 xconstraints[i] = constraints[i+1];
1438 xconstraints[i+1] = constraints[i];
1439 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1440 recog_data.operand, modes,
1441 xconstraints, insn, pref);
1442 }
1443 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1444 recog_data.operand, modes,
1445 constraints, insn, pref);
1446 }
1447
1448 \f
1449
1450 /* Process one insn INSN. Scan it and record each time it would save
1451 code to put a certain allocnos in a certain class. Return the last
1452 insn processed, so that the scan can be continued from there. */
1453 static rtx_insn *
1454 scan_one_insn (rtx_insn *insn)
1455 {
1456 enum rtx_code pat_code;
1457 rtx set, note;
1458 int i, k;
1459 bool counted_mem;
1460
1461 if (!NONDEBUG_INSN_P (insn))
1462 return insn;
1463
1464 pat_code = GET_CODE (PATTERN (insn));
1465 if (pat_code == ASM_INPUT)
1466 return insn;
1467
1468 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1469 and initialize the register move costs of mode M.
1470
1471 The pseudo may be related to another pseudo via a copy (implicit or
1472 explicit) and if there are no mode M uses/sets of the original
1473 pseudo, then we may leave the register move costs uninitialized for
1474 mode M. */
1475 if (pat_code == USE || pat_code == CLOBBER)
1476 {
1477 rtx x = XEXP (PATTERN (insn), 0);
1478 if (GET_CODE (x) == REG
1479 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1480 && have_regs_of_mode[GET_MODE (x)])
1481 ira_init_register_move_cost_if_necessary (GET_MODE (x));
1482 return insn;
1483 }
1484
1485 if (pat_code == CLOBBER_HIGH)
1486 {
1487 gcc_assert (REG_P (XEXP (PATTERN (insn), 0))
1488 && HARD_REGISTER_P (XEXP (PATTERN (insn), 0)));
1489 return insn;
1490 }
1491
1492 counted_mem = false;
1493 set = single_set (insn);
1494 extract_insn (insn);
1495
1496 /* If this insn loads a parameter from its stack slot, then it
1497 represents a savings, rather than a cost, if the parameter is
1498 stored in memory. Record this fact.
1499
1500 Similarly if we're loading other constants from memory (constant
1501 pool, TOC references, small data areas, etc) and this is the only
1502 assignment to the destination pseudo.
1503
1504 Don't do this if SET_SRC (set) isn't a general operand, if it is
1505 a memory requiring special instructions to load it, decreasing
1506 mem_cost might result in it being loaded using the specialized
1507 instruction into a register, then stored into stack and loaded
1508 again from the stack. See PR52208.
1509
1510 Don't do this if SET_SRC (set) has side effect. See PR56124. */
1511 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1512 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1513 && ((MEM_P (XEXP (note, 0))
1514 && !side_effects_p (SET_SRC (set)))
1515 || (CONSTANT_P (XEXP (note, 0))
1516 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1517 XEXP (note, 0))
1518 && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1519 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1520 /* LRA does not use equiv with a symbol for PIC code. */
1521 && (! ira_use_lra_p || ! pic_offset_table_rtx
1522 || ! contains_symbol_ref_p (XEXP (note, 0))))
1523 {
1524 enum reg_class cl = GENERAL_REGS;
1525 rtx reg = SET_DEST (set);
1526 int num = COST_INDEX (REGNO (reg));
1527
1528 COSTS (costs, num)->mem_cost
1529 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1530 record_address_regs (GET_MODE (SET_SRC (set)),
1531 MEM_ADDR_SPACE (SET_SRC (set)),
1532 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1533 frequency * 2);
1534 counted_mem = true;
1535 }
1536
1537 record_operand_costs (insn, pref);
1538
1539 /* Now add the cost for each operand to the total costs for its
1540 allocno. */
1541 for (i = 0; i < recog_data.n_operands; i++)
1542 {
1543 rtx op = recog_data.operand[i];
1544
1545 if (GET_CODE (op) == SUBREG)
1546 op = SUBREG_REG (op);
1547 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1548 {
1549 int regno = REGNO (op);
1550 struct costs *p = COSTS (costs, COST_INDEX (regno));
1551 struct costs *q = op_costs[i];
1552 int *p_costs = p->cost, *q_costs = q->cost;
1553 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1554 int add_cost;
1555
1556 /* If the already accounted for the memory "cost" above, don't
1557 do so again. */
1558 if (!counted_mem)
1559 {
1560 add_cost = q->mem_cost;
1561 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1562 p->mem_cost = INT_MAX;
1563 else
1564 p->mem_cost += add_cost;
1565 }
1566 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1567 {
1568 add_cost = q_costs[k];
1569 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1570 p_costs[k] = INT_MAX;
1571 else
1572 p_costs[k] += add_cost;
1573 }
1574 }
1575 }
1576 return insn;
1577 }
1578
1579 \f
1580
1581 /* Print allocnos costs to file F. */
1582 static void
1583 print_allocno_costs (FILE *f)
1584 {
1585 int k;
1586 ira_allocno_t a;
1587 ira_allocno_iterator ai;
1588
1589 ira_assert (allocno_p);
1590 fprintf (f, "\n");
1591 FOR_EACH_ALLOCNO (a, ai)
1592 {
1593 int i, rclass;
1594 basic_block bb;
1595 int regno = ALLOCNO_REGNO (a);
1596 cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1597 enum reg_class *cost_classes = cost_classes_ptr->classes;
1598
1599 i = ALLOCNO_NUM (a);
1600 fprintf (f, " a%d(r%d,", i, regno);
1601 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1602 fprintf (f, "b%d", bb->index);
1603 else
1604 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1605 fprintf (f, ") costs:");
1606 for (k = 0; k < cost_classes_ptr->num; k++)
1607 {
1608 rclass = cost_classes[k];
1609 fprintf (f, " %s:%d", reg_class_names[rclass],
1610 COSTS (costs, i)->cost[k]);
1611 if (flag_ira_region == IRA_REGION_ALL
1612 || flag_ira_region == IRA_REGION_MIXED)
1613 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1614 }
1615 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1616 if (flag_ira_region == IRA_REGION_ALL
1617 || flag_ira_region == IRA_REGION_MIXED)
1618 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1619 fprintf (f, "\n");
1620 }
1621 }
1622
1623 /* Print pseudo costs to file F. */
1624 static void
1625 print_pseudo_costs (FILE *f)
1626 {
1627 int regno, k;
1628 int rclass;
1629 cost_classes_t cost_classes_ptr;
1630 enum reg_class *cost_classes;
1631
1632 ira_assert (! allocno_p);
1633 fprintf (f, "\n");
1634 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1635 {
1636 if (REG_N_REFS (regno) <= 0)
1637 continue;
1638 cost_classes_ptr = regno_cost_classes[regno];
1639 cost_classes = cost_classes_ptr->classes;
1640 fprintf (f, " r%d costs:", regno);
1641 for (k = 0; k < cost_classes_ptr->num; k++)
1642 {
1643 rclass = cost_classes[k];
1644 fprintf (f, " %s:%d", reg_class_names[rclass],
1645 COSTS (costs, regno)->cost[k]);
1646 }
1647 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1648 }
1649 }
1650
1651 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1652 costs. */
1653 static void
1654 process_bb_for_costs (basic_block bb)
1655 {
1656 rtx_insn *insn;
1657
1658 frequency = REG_FREQ_FROM_BB (bb);
1659 if (frequency == 0)
1660 frequency = 1;
1661 FOR_BB_INSNS (bb, insn)
1662 insn = scan_one_insn (insn);
1663 }
1664
1665 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1666 costs. */
1667 static void
1668 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1669 {
1670 basic_block bb;
1671
1672 bb = loop_tree_node->bb;
1673 if (bb != NULL)
1674 process_bb_for_costs (bb);
1675 }
1676
1677 /* Find costs of register classes and memory for allocnos or pseudos
1678 and their best costs. Set up preferred, alternative and allocno
1679 classes for pseudos. */
1680 static void
1681 find_costs_and_classes (FILE *dump_file)
1682 {
1683 int i, k, start, max_cost_classes_num;
1684 int pass;
1685 basic_block bb;
1686 enum reg_class *regno_best_class, new_class;
1687
1688 init_recog ();
1689 regno_best_class
1690 = (enum reg_class *) ira_allocate (max_reg_num ()
1691 * sizeof (enum reg_class));
1692 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1693 regno_best_class[i] = NO_REGS;
1694 if (!resize_reg_info () && allocno_p
1695 && pseudo_classes_defined_p && flag_expensive_optimizations)
1696 {
1697 ira_allocno_t a;
1698 ira_allocno_iterator ai;
1699
1700 pref = pref_buffer;
1701 max_cost_classes_num = 1;
1702 FOR_EACH_ALLOCNO (a, ai)
1703 {
1704 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1705 setup_regno_cost_classes_by_aclass
1706 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1707 max_cost_classes_num
1708 = MAX (max_cost_classes_num,
1709 regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1710 }
1711 start = 1;
1712 }
1713 else
1714 {
1715 pref = NULL;
1716 max_cost_classes_num = ira_important_classes_num;
1717 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1718 if (regno_reg_rtx[i] != NULL_RTX)
1719 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1720 else
1721 setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1722 start = 0;
1723 }
1724 if (allocno_p)
1725 /* Clear the flag for the next compiled function. */
1726 pseudo_classes_defined_p = false;
1727 /* Normally we scan the insns once and determine the best class to
1728 use for each allocno. However, if -fexpensive-optimizations are
1729 on, we do so twice, the second time using the tentative best
1730 classes to guide the selection. */
1731 for (pass = start; pass <= flag_expensive_optimizations; pass++)
1732 {
1733 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1734 fprintf (dump_file,
1735 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1736
1737 if (pass != start)
1738 {
1739 max_cost_classes_num = 1;
1740 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1741 {
1742 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1743 max_cost_classes_num
1744 = MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1745 }
1746 }
1747
1748 struct_costs_size
1749 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1750 /* Zero out our accumulation of the cost of each class for each
1751 allocno. */
1752 memset (costs, 0, cost_elements_num * struct_costs_size);
1753
1754 if (allocno_p)
1755 {
1756 /* Scan the instructions and record each time it would save code
1757 to put a certain allocno in a certain class. */
1758 ira_traverse_loop_tree (true, ira_loop_tree_root,
1759 process_bb_node_for_costs, NULL);
1760
1761 memcpy (total_allocno_costs, costs,
1762 max_struct_costs_size * ira_allocnos_num);
1763 }
1764 else
1765 {
1766 basic_block bb;
1767
1768 FOR_EACH_BB_FN (bb, cfun)
1769 process_bb_for_costs (bb);
1770 }
1771
1772 if (pass == 0)
1773 pref = pref_buffer;
1774
1775 /* Now for each allocno look at how desirable each class is and
1776 find which class is preferred. */
1777 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1778 {
1779 ira_allocno_t a, parent_a;
1780 int rclass, a_num, parent_a_num, add_cost;
1781 ira_loop_tree_node_t parent;
1782 int best_cost, allocno_cost;
1783 enum reg_class best, alt_class;
1784 cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1785 enum reg_class *cost_classes;
1786 int *i_costs = temp_costs->cost;
1787 int i_mem_cost;
1788 int equiv_savings = regno_equiv_gains[i];
1789
1790 if (! allocno_p)
1791 {
1792 if (regno_reg_rtx[i] == NULL_RTX)
1793 continue;
1794 memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1795 i_mem_cost = temp_costs->mem_cost;
1796 cost_classes = cost_classes_ptr->classes;
1797 }
1798 else
1799 {
1800 if (ira_regno_allocno_map[i] == NULL)
1801 continue;
1802 memset (temp_costs, 0, struct_costs_size);
1803 i_mem_cost = 0;
1804 cost_classes = cost_classes_ptr->classes;
1805 /* Find cost of all allocnos with the same regno. */
1806 for (a = ira_regno_allocno_map[i];
1807 a != NULL;
1808 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1809 {
1810 int *a_costs, *p_costs;
1811
1812 a_num = ALLOCNO_NUM (a);
1813 if ((flag_ira_region == IRA_REGION_ALL
1814 || flag_ira_region == IRA_REGION_MIXED)
1815 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1816 && (parent_a = parent->regno_allocno_map[i]) != NULL
1817 /* There are no caps yet. */
1818 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1819 (a)->border_allocnos,
1820 ALLOCNO_NUM (a)))
1821 {
1822 /* Propagate costs to upper levels in the region
1823 tree. */
1824 parent_a_num = ALLOCNO_NUM (parent_a);
1825 a_costs = COSTS (total_allocno_costs, a_num)->cost;
1826 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1827 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1828 {
1829 add_cost = a_costs[k];
1830 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1831 p_costs[k] = INT_MAX;
1832 else
1833 p_costs[k] += add_cost;
1834 }
1835 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1836 if (add_cost > 0
1837 && (INT_MAX - add_cost
1838 < COSTS (total_allocno_costs,
1839 parent_a_num)->mem_cost))
1840 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1841 = INT_MAX;
1842 else
1843 COSTS (total_allocno_costs, parent_a_num)->mem_cost
1844 += add_cost;
1845
1846 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1847 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1848 }
1849 a_costs = COSTS (costs, a_num)->cost;
1850 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1851 {
1852 add_cost = a_costs[k];
1853 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1854 i_costs[k] = INT_MAX;
1855 else
1856 i_costs[k] += add_cost;
1857 }
1858 add_cost = COSTS (costs, a_num)->mem_cost;
1859 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1860 i_mem_cost = INT_MAX;
1861 else
1862 i_mem_cost += add_cost;
1863 }
1864 }
1865 if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1866 i_mem_cost = 0;
1867 else if (equiv_savings < 0)
1868 i_mem_cost = -equiv_savings;
1869 else if (equiv_savings > 0)
1870 {
1871 i_mem_cost = 0;
1872 for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1873 i_costs[k] += equiv_savings;
1874 }
1875
1876 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1877 best = ALL_REGS;
1878 alt_class = NO_REGS;
1879 /* Find best common class for all allocnos with the same
1880 regno. */
1881 for (k = 0; k < cost_classes_ptr->num; k++)
1882 {
1883 rclass = cost_classes[k];
1884 if (i_costs[k] < best_cost)
1885 {
1886 best_cost = i_costs[k];
1887 best = (enum reg_class) rclass;
1888 }
1889 else if (i_costs[k] == best_cost)
1890 best = ira_reg_class_subunion[best][rclass];
1891 if (pass == flag_expensive_optimizations
1892 /* We still prefer registers to memory even at this
1893 stage if their costs are the same. We will make
1894 a final decision during assigning hard registers
1895 when we have all info including more accurate
1896 costs which might be affected by assigning hard
1897 registers to other pseudos because the pseudos
1898 involved in moves can be coalesced. */
1899 && i_costs[k] <= i_mem_cost
1900 && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1901 > reg_class_size[alt_class]))
1902 alt_class = reg_class_subunion[alt_class][rclass];
1903 }
1904 alt_class = ira_allocno_class_translate[alt_class];
1905 if (best_cost > i_mem_cost
1906 && ! non_spilled_static_chain_regno_p (i))
1907 regno_aclass[i] = NO_REGS;
1908 else if (!optimize && !targetm.class_likely_spilled_p (best))
1909 /* Registers in the alternative class are likely to need
1910 longer or slower sequences than registers in the best class.
1911 When optimizing we make some effort to use the best class
1912 over the alternative class where possible, but at -O0 we
1913 effectively give the alternative class equal weight.
1914 We then run the risk of using slower alternative registers
1915 when plenty of registers from the best class are still free.
1916 This is especially true because live ranges tend to be very
1917 short in -O0 code and so register pressure tends to be low.
1918
1919 Avoid that by ignoring the alternative class if the best
1920 class has plenty of registers.
1921
1922 The union class arrays give important classes and only
1923 part of it are allocno classes. So translate them into
1924 allocno classes. */
1925 regno_aclass[i] = ira_allocno_class_translate[best];
1926 else
1927 {
1928 /* Make the common class the biggest class of best and
1929 alt_class. Translate the common class into an
1930 allocno class too. */
1931 regno_aclass[i] = (ira_allocno_class_translate
1932 [ira_reg_class_superunion[best][alt_class]]);
1933 ira_assert (regno_aclass[i] != NO_REGS
1934 && ira_reg_allocno_class_p[regno_aclass[i]]);
1935 }
1936 if ((new_class
1937 = (reg_class) (targetm.ira_change_pseudo_allocno_class
1938 (i, regno_aclass[i], best))) != regno_aclass[i])
1939 {
1940 regno_aclass[i] = new_class;
1941 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1942 reg_class_contents[best]))
1943 best = new_class;
1944 if (hard_reg_set_subset_p (reg_class_contents[new_class],
1945 reg_class_contents[alt_class]))
1946 alt_class = new_class;
1947 }
1948 if (pass == flag_expensive_optimizations)
1949 {
1950 if (best_cost > i_mem_cost
1951 /* Do not assign NO_REGS to static chain pointer
1952 pseudo when non-local goto is used. */
1953 && ! non_spilled_static_chain_regno_p (i))
1954 best = alt_class = NO_REGS;
1955 else if (best == alt_class)
1956 alt_class = NO_REGS;
1957 setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1958 if ((!allocno_p || internal_flag_ira_verbose > 2)
1959 && dump_file != NULL)
1960 fprintf (dump_file,
1961 " r%d: preferred %s, alternative %s, allocno %s\n",
1962 i, reg_class_names[best], reg_class_names[alt_class],
1963 reg_class_names[regno_aclass[i]]);
1964 }
1965 regno_best_class[i] = best;
1966 if (! allocno_p)
1967 {
1968 pref[i] = (best_cost > i_mem_cost
1969 && ! non_spilled_static_chain_regno_p (i)
1970 ? NO_REGS : best);
1971 continue;
1972 }
1973 for (a = ira_regno_allocno_map[i];
1974 a != NULL;
1975 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1976 {
1977 enum reg_class aclass = regno_aclass[i];
1978 int a_num = ALLOCNO_NUM (a);
1979 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1980 int *a_costs = COSTS (costs, a_num)->cost;
1981
1982 if (aclass == NO_REGS)
1983 best = NO_REGS;
1984 else
1985 {
1986 /* Finding best class which is subset of the common
1987 class. */
1988 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1989 allocno_cost = best_cost;
1990 best = ALL_REGS;
1991 for (k = 0; k < cost_classes_ptr->num; k++)
1992 {
1993 rclass = cost_classes[k];
1994 if (! ira_class_subset_p[rclass][aclass])
1995 continue;
1996 if (total_a_costs[k] < best_cost)
1997 {
1998 best_cost = total_a_costs[k];
1999 allocno_cost = a_costs[k];
2000 best = (enum reg_class) rclass;
2001 }
2002 else if (total_a_costs[k] == best_cost)
2003 {
2004 best = ira_reg_class_subunion[best][rclass];
2005 allocno_cost = MAX (allocno_cost, a_costs[k]);
2006 }
2007 }
2008 ALLOCNO_CLASS_COST (a) = allocno_cost;
2009 }
2010 if (internal_flag_ira_verbose > 2 && dump_file != NULL
2011 && (pass == 0 || pref[a_num] != best))
2012 {
2013 fprintf (dump_file, " a%d (r%d,", a_num, i);
2014 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
2015 fprintf (dump_file, "b%d", bb->index);
2016 else
2017 fprintf (dump_file, "l%d",
2018 ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
2019 fprintf (dump_file, ") best %s, allocno %s\n",
2020 reg_class_names[best],
2021 reg_class_names[aclass]);
2022 }
2023 pref[a_num] = best;
2024 if (pass == flag_expensive_optimizations && best != aclass
2025 && ira_class_hard_regs_num[best] > 0
2026 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
2027 >= ira_class_hard_regs_num[best]))
2028 {
2029 int ind = cost_classes_ptr->index[aclass];
2030
2031 ira_assert (ind >= 0);
2032 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
2033 ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
2034 (a_costs[ind] - ALLOCNO_CLASS_COST (a))
2035 / (ira_register_move_cost
2036 [ALLOCNO_MODE (a)][best][aclass]));
2037 for (k = 0; k < cost_classes_ptr->num; k++)
2038 if (ira_class_subset_p[cost_classes[k]][best])
2039 a_costs[k] = a_costs[ind];
2040 }
2041 }
2042 }
2043
2044 if (internal_flag_ira_verbose > 4 && dump_file)
2045 {
2046 if (allocno_p)
2047 print_allocno_costs (dump_file);
2048 else
2049 print_pseudo_costs (dump_file);
2050 fprintf (dump_file,"\n");
2051 }
2052 }
2053 ira_free (regno_best_class);
2054 }
2055
2056 \f
2057
2058 /* Process moves involving hard regs to modify allocno hard register
2059 costs. We can do this only after determining allocno class. If a
2060 hard register forms a register class, then moves with the hard
2061 register are already taken into account in class costs for the
2062 allocno. */
2063 static void
2064 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2065 {
2066 int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2067 bool to_p;
2068 ira_allocno_t a, curr_a;
2069 ira_loop_tree_node_t curr_loop_tree_node;
2070 enum reg_class rclass;
2071 basic_block bb;
2072 rtx_insn *insn;
2073 rtx set, src, dst;
2074
2075 bb = loop_tree_node->bb;
2076 if (bb == NULL)
2077 return;
2078 freq = REG_FREQ_FROM_BB (bb);
2079 if (freq == 0)
2080 freq = 1;
2081 FOR_BB_INSNS (bb, insn)
2082 {
2083 if (!NONDEBUG_INSN_P (insn))
2084 continue;
2085 set = single_set (insn);
2086 if (set == NULL_RTX)
2087 continue;
2088 dst = SET_DEST (set);
2089 src = SET_SRC (set);
2090 if (! REG_P (dst) || ! REG_P (src))
2091 continue;
2092 dst_regno = REGNO (dst);
2093 src_regno = REGNO (src);
2094 if (dst_regno >= FIRST_PSEUDO_REGISTER
2095 && src_regno < FIRST_PSEUDO_REGISTER)
2096 {
2097 hard_regno = src_regno;
2098 a = ira_curr_regno_allocno_map[dst_regno];
2099 to_p = true;
2100 }
2101 else if (src_regno >= FIRST_PSEUDO_REGISTER
2102 && dst_regno < FIRST_PSEUDO_REGISTER)
2103 {
2104 hard_regno = dst_regno;
2105 a = ira_curr_regno_allocno_map[src_regno];
2106 to_p = false;
2107 }
2108 else
2109 continue;
2110 if (reg_class_size[(int) REGNO_REG_CLASS (hard_regno)]
2111 == (ira_reg_class_max_nregs
2112 [REGNO_REG_CLASS (hard_regno)][(int) ALLOCNO_MODE(a)]))
2113 /* If the class can provide only one hard reg to the allocno,
2114 we processed the insn record_operand_costs already and we
2115 actually updated the hard reg cost there. */
2116 continue;
2117 rclass = ALLOCNO_CLASS (a);
2118 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2119 continue;
2120 i = ira_class_hard_reg_index[rclass][hard_regno];
2121 if (i < 0)
2122 continue;
2123 a_regno = ALLOCNO_REGNO (a);
2124 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2125 curr_loop_tree_node != NULL;
2126 curr_loop_tree_node = curr_loop_tree_node->parent)
2127 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2128 ira_add_allocno_pref (curr_a, hard_regno, freq);
2129 {
2130 int cost;
2131 enum reg_class hard_reg_class;
2132 machine_mode mode;
2133
2134 mode = ALLOCNO_MODE (a);
2135 hard_reg_class = REGNO_REG_CLASS (hard_regno);
2136 ira_init_register_move_cost_if_necessary (mode);
2137 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2138 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2139 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2140 ALLOCNO_CLASS_COST (a));
2141 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2142 rclass, 0);
2143 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2144 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2145 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2146 ALLOCNO_HARD_REG_COSTS (a)[i]);
2147 }
2148 }
2149 }
2150
2151 /* After we find hard register and memory costs for allocnos, define
2152 its class and modify hard register cost because insns moving
2153 allocno to/from hard registers. */
2154 static void
2155 setup_allocno_class_and_costs (void)
2156 {
2157 int i, j, n, regno, hard_regno, num;
2158 int *reg_costs;
2159 enum reg_class aclass, rclass;
2160 ira_allocno_t a;
2161 ira_allocno_iterator ai;
2162 cost_classes_t cost_classes_ptr;
2163
2164 ira_assert (allocno_p);
2165 FOR_EACH_ALLOCNO (a, ai)
2166 {
2167 i = ALLOCNO_NUM (a);
2168 regno = ALLOCNO_REGNO (a);
2169 aclass = regno_aclass[regno];
2170 cost_classes_ptr = regno_cost_classes[regno];
2171 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2172 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2173 ira_set_allocno_class (a, aclass);
2174 if (aclass == NO_REGS)
2175 continue;
2176 if (optimize && ALLOCNO_CLASS (a) != pref[i])
2177 {
2178 n = ira_class_hard_regs_num[aclass];
2179 ALLOCNO_HARD_REG_COSTS (a)
2180 = reg_costs = ira_allocate_cost_vector (aclass);
2181 for (j = n - 1; j >= 0; j--)
2182 {
2183 hard_regno = ira_class_hard_regs[aclass][j];
2184 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2185 reg_costs[j] = ALLOCNO_CLASS_COST (a);
2186 else
2187 {
2188 rclass = REGNO_REG_CLASS (hard_regno);
2189 num = cost_classes_ptr->index[rclass];
2190 if (num < 0)
2191 {
2192 num = cost_classes_ptr->hard_regno_index[hard_regno];
2193 ira_assert (num >= 0);
2194 }
2195 reg_costs[j] = COSTS (costs, i)->cost[num];
2196 }
2197 }
2198 }
2199 }
2200 if (optimize)
2201 ira_traverse_loop_tree (true, ira_loop_tree_root,
2202 process_bb_node_for_hard_reg_moves, NULL);
2203 }
2204
2205 \f
2206
2207 /* Function called once during compiler work. */
2208 void
2209 ira_init_costs_once (void)
2210 {
2211 int i;
2212
2213 init_cost = NULL;
2214 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2215 {
2216 op_costs[i] = NULL;
2217 this_op_costs[i] = NULL;
2218 }
2219 temp_costs = NULL;
2220 }
2221
2222 /* Free allocated temporary cost vectors. */
2223 void
2224 target_ira_int::free_ira_costs ()
2225 {
2226 int i;
2227
2228 free (x_init_cost);
2229 x_init_cost = NULL;
2230 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2231 {
2232 free (x_op_costs[i]);
2233 free (x_this_op_costs[i]);
2234 x_op_costs[i] = x_this_op_costs[i] = NULL;
2235 }
2236 free (x_temp_costs);
2237 x_temp_costs = NULL;
2238 }
2239
2240 /* This is called each time register related information is
2241 changed. */
2242 void
2243 ira_init_costs (void)
2244 {
2245 int i;
2246
2247 this_target_ira_int->free_ira_costs ();
2248 max_struct_costs_size
2249 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2250 /* Don't use ira_allocate because vectors live through several IRA
2251 calls. */
2252 init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2253 init_cost->mem_cost = 1000000;
2254 for (i = 0; i < ira_important_classes_num; i++)
2255 init_cost->cost[i] = 1000000;
2256 for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2257 {
2258 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2259 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2260 }
2261 temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2262 }
2263
2264 \f
2265
2266 /* Common initialization function for ira_costs and
2267 ira_set_pseudo_classes. */
2268 static void
2269 init_costs (void)
2270 {
2271 init_subregs_of_mode ();
2272 costs = (struct costs *) ira_allocate (max_struct_costs_size
2273 * cost_elements_num);
2274 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2275 * cost_elements_num);
2276 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2277 * max_reg_num ());
2278 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2279 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2280 }
2281
2282 /* Common finalization function for ira_costs and
2283 ira_set_pseudo_classes. */
2284 static void
2285 finish_costs (void)
2286 {
2287 finish_subregs_of_mode ();
2288 ira_free (regno_equiv_gains);
2289 ira_free (regno_aclass);
2290 ira_free (pref_buffer);
2291 ira_free (costs);
2292 }
2293
2294 /* Entry function which defines register class, memory and hard
2295 register costs for each allocno. */
2296 void
2297 ira_costs (void)
2298 {
2299 allocno_p = true;
2300 cost_elements_num = ira_allocnos_num;
2301 init_costs ();
2302 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2303 * ira_allocnos_num);
2304 initiate_regno_cost_classes ();
2305 calculate_elim_costs_all_insns ();
2306 find_costs_and_classes (ira_dump_file);
2307 setup_allocno_class_and_costs ();
2308 finish_regno_cost_classes ();
2309 finish_costs ();
2310 ira_free (total_allocno_costs);
2311 }
2312
2313 /* Entry function which defines classes for pseudos.
2314 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */
2315 void
2316 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2317 {
2318 allocno_p = false;
2319 internal_flag_ira_verbose = flag_ira_verbose;
2320 cost_elements_num = max_reg_num ();
2321 init_costs ();
2322 initiate_regno_cost_classes ();
2323 find_costs_and_classes (dump_file);
2324 finish_regno_cost_classes ();
2325 if (define_pseudo_classes)
2326 pseudo_classes_defined_p = true;
2327
2328 finish_costs ();
2329 }
2330
2331 \f
2332
2333 /* Change hard register costs for allocnos which lives through
2334 function calls. This is called only when we found all intersected
2335 calls during building allocno live ranges. */
2336 void
2337 ira_tune_allocno_costs (void)
2338 {
2339 int j, n, regno;
2340 int cost, min_cost, *reg_costs;
2341 enum reg_class aclass, rclass;
2342 machine_mode mode;
2343 ira_allocno_t a;
2344 ira_allocno_iterator ai;
2345 ira_allocno_object_iterator oi;
2346 ira_object_t obj;
2347 bool skip_p;
2348 HARD_REG_SET *crossed_calls_clobber_regs;
2349
2350 FOR_EACH_ALLOCNO (a, ai)
2351 {
2352 aclass = ALLOCNO_CLASS (a);
2353 if (aclass == NO_REGS)
2354 continue;
2355 mode = ALLOCNO_MODE (a);
2356 n = ira_class_hard_regs_num[aclass];
2357 min_cost = INT_MAX;
2358 if (ALLOCNO_CALLS_CROSSED_NUM (a)
2359 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2360 {
2361 ira_allocate_and_set_costs
2362 (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2363 ALLOCNO_CLASS_COST (a));
2364 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2365 for (j = n - 1; j >= 0; j--)
2366 {
2367 regno = ira_class_hard_regs[aclass][j];
2368 skip_p = false;
2369 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2370 {
2371 if (ira_hard_reg_set_intersection_p (regno, mode,
2372 OBJECT_CONFLICT_HARD_REGS
2373 (obj)))
2374 {
2375 skip_p = true;
2376 break;
2377 }
2378 }
2379 if (skip_p)
2380 continue;
2381 rclass = REGNO_REG_CLASS (regno);
2382 cost = 0;
2383 crossed_calls_clobber_regs
2384 = &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2385 if (ira_hard_reg_set_intersection_p (regno, mode,
2386 *crossed_calls_clobber_regs)
2387 && (ira_hard_reg_set_intersection_p (regno, mode,
2388 call_used_reg_set)
2389 || targetm.hard_regno_call_part_clobbered (NULL, regno,
2390 mode)))
2391 cost += (ALLOCNO_CALL_FREQ (a)
2392 * (ira_memory_move_cost[mode][rclass][0]
2393 + ira_memory_move_cost[mode][rclass][1]));
2394 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2395 cost += ((ira_memory_move_cost[mode][rclass][0]
2396 + ira_memory_move_cost[mode][rclass][1])
2397 * ALLOCNO_FREQ (a)
2398 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2399 #endif
2400 if (INT_MAX - cost < reg_costs[j])
2401 reg_costs[j] = INT_MAX;
2402 else
2403 reg_costs[j] += cost;
2404 if (min_cost > reg_costs[j])
2405 min_cost = reg_costs[j];
2406 }
2407 }
2408 if (min_cost != INT_MAX)
2409 ALLOCNO_CLASS_COST (a) = min_cost;
2410
2411 /* Some targets allow pseudos to be allocated to unaligned sequences
2412 of hard registers. However, selecting an unaligned sequence can
2413 unnecessarily restrict later allocations. So increase the cost of
2414 unaligned hard regs to encourage the use of aligned hard regs. */
2415 {
2416 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2417
2418 if (nregs > 1)
2419 {
2420 ira_allocate_and_set_costs
2421 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2422 reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2423 for (j = n - 1; j >= 0; j--)
2424 {
2425 regno = ira_non_ordered_class_hard_regs[aclass][j];
2426 if ((regno % nregs) != 0)
2427 {
2428 int index = ira_class_hard_reg_index[aclass][regno];
2429 ira_assert (index != -1);
2430 reg_costs[index] += ALLOCNO_FREQ (a);
2431 }
2432 }
2433 }
2434 }
2435 }
2436 }
2437
2438 /* Add COST to the estimated gain for eliminating REGNO with its
2439 equivalence. If COST is zero, record that no such elimination is
2440 possible. */
2441
2442 void
2443 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2444 {
2445 if (cost == 0)
2446 regno_equiv_gains[regno] = 0;
2447 else
2448 regno_equiv_gains[regno] += cost;
2449 }
2450
2451 void
2452 ira_costs_c_finalize (void)
2453 {
2454 this_target_ira_int->free_ira_costs ();
2455 }