]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/ira-int.h
re PR middle-end/37813 (assert with IRA_COVER_CLASSES with singleton)
[thirdparty/gcc.git] / gcc / ira-int.h
1 /* Integrated Register Allocator (IRA) intercommunication header file.
2 Copyright (C) 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "cfgloop.h"
23 #include "ira.h"
24 #include "alloc-pool.h"
25
26 /* To provide consistency in naming, all IRA external variables,
27 functions, common typedefs start with prefix ira_. */
28
29 #ifdef ENABLE_CHECKING
30 #define ENABLE_IRA_CHECKING
31 #endif
32
33 #ifdef ENABLE_IRA_CHECKING
34 #define ira_assert(c) gcc_assert (c)
35 #else
36 #define ira_assert(c)
37 #endif
38
39 /* Compute register frequency from edge frequency FREQ. It is
40 analogous to REG_FREQ_FROM_BB. When optimizing for size, or
41 profile driven feedback is available and the function is never
42 executed, frequency is always equivalent. Otherwise rescale the
43 edge frequency. */
44 #define REG_FREQ_FROM_EDGE_FREQ(freq) \
45 (optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
46 ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
47 ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
48
49 /* All natural loops. */
50 extern struct loops ira_loops;
51
52 /* A modified value of flag `-fira-verbose' used internally. */
53 extern int internal_flag_ira_verbose;
54
55 /* Dump file of the allocator if it is not NULL. */
56 extern FILE *ira_dump_file;
57
58 /* Typedefs for pointers to allocno live range, allocno, and copy of
59 allocnos. */
60 typedef struct ira_allocno_live_range *allocno_live_range_t;
61 typedef struct ira_allocno *ira_allocno_t;
62 typedef struct ira_allocno_copy *ira_copy_t;
63
64 /* Definition of vector of allocnos and copies. */
65 DEF_VEC_P(ira_allocno_t);
66 DEF_VEC_ALLOC_P(ira_allocno_t, heap);
67 DEF_VEC_P(ira_copy_t);
68 DEF_VEC_ALLOC_P(ira_copy_t, heap);
69
70 /* Typedef for pointer to the subsequent structure. */
71 typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
72
73 /* In general case, IRA is a regional allocator. The regions are
74 nested and form a tree. Currently regions are natural loops. The
75 following structure describes loop tree node (representing basic
76 block or loop). We need such tree because the loop tree from
77 cfgloop.h is not convenient for the optimization: basic blocks are
78 not a part of the tree from cfgloop.h. We also use the nodes for
79 storing additional information about basic blocks/loops for the
80 register allocation purposes. */
81 struct ira_loop_tree_node
82 {
83 /* The node represents basic block if children == NULL. */
84 basic_block bb; /* NULL for loop. */
85 struct loop *loop; /* NULL for BB. */
86 /* The next (loop) node of with the same parent. SUBLOOP_NEXT is
87 always NULL for BBs. */
88 ira_loop_tree_node_t subloop_next, next;
89 /* The first (loop) node immediately inside the node. SUBLOOPS is
90 always NULL for BBs. */
91 ira_loop_tree_node_t subloops, children;
92 /* The node immediately containing given node. */
93 ira_loop_tree_node_t parent;
94
95 /* Loop level in range [0, ira_loop_tree_height). */
96 int level;
97
98 /* All the following members are defined only for nodes representing
99 loops. */
100
101 /* Allocnos in the loop corresponding to their regnos. If it is
102 NULL the loop does not form a separate register allocation region
103 (e.g. because it has abnormal enter/exit edges and we can not put
104 code for register shuffling on the edges if a different
105 allocation is used for a pseudo-register on different sides of
106 the edges). Caps are not in the map (remember we can have more
107 one cap with the same regno in a region). */
108 ira_allocno_t *regno_allocno_map;
109
110 /* Maximal register pressure inside loop for given register class
111 (defined only for the cover classes). */
112 int reg_pressure[N_REG_CLASSES];
113
114 /* Numbers of allocnos referred or living in the loop node (except
115 for its subloops). */
116 bitmap all_allocnos;
117
118 /* Numbers of allocnos living at the loop borders. */
119 bitmap border_allocnos;
120
121 /* Regnos of pseudos modified in the loop node (including its
122 subloops). */
123 bitmap modified_regnos;
124
125 /* Numbers of copies referred in the corresponding loop. */
126 bitmap local_copies;
127 };
128
129 /* The root of the loop tree corresponding to the all function. */
130 extern ira_loop_tree_node_t ira_loop_tree_root;
131
132 /* Height of the loop tree. */
133 extern int ira_loop_tree_height;
134
135 /* All nodes representing basic blocks are referred through the
136 following array. We can not use basic block member `aux' for this
137 because it is used for insertion of insns on edges. */
138 extern ira_loop_tree_node_t ira_bb_nodes;
139
140 /* Two access macros to the nodes representing basic blocks. */
141 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
142 #define IRA_BB_NODE_BY_INDEX(index) __extension__ \
143 (({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
144 if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
145 { \
146 fprintf (stderr, \
147 "\n%s: %d: error in %s: it is not a block node\n", \
148 __FILE__, __LINE__, __FUNCTION__); \
149 gcc_unreachable (); \
150 } \
151 _node; }))
152 #else
153 #define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
154 #endif
155
156 #define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
157
158 /* All nodes representing loops are referred through the following
159 array. */
160 extern ira_loop_tree_node_t ira_loop_nodes;
161
162 /* Two access macros to the nodes representing loops. */
163 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
164 #define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
165 (({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]);\
166 if (_node->children == NULL || _node->bb != NULL || _node->loop == NULL)\
167 { \
168 fprintf (stderr, \
169 "\n%s: %d: error in %s: it is not a loop node\n", \
170 __FILE__, __LINE__, __FUNCTION__); \
171 gcc_unreachable (); \
172 } \
173 _node; }))
174 #else
175 #define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
176 #endif
177
178 #define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
179
180 \f
181
182 /* The structure describes program points where a given allocno lives.
183 To save memory we store allocno conflicts only for the same cover
184 class allocnos which is enough to assign hard registers. To find
185 conflicts for other allocnos (e.g. to assign stack memory slot) we
186 use the live ranges. If the live ranges of two allocnos are
187 intersected, the allocnos are in conflict. */
188 struct ira_allocno_live_range
189 {
190 /* Allocno whose live range is described by given structure. */
191 ira_allocno_t allocno;
192 /* Program point range. */
193 int start, finish;
194 /* Next structure describing program points where the allocno
195 lives. */
196 allocno_live_range_t next;
197 /* Pointer to structures with the same start/finish. */
198 allocno_live_range_t start_next, finish_next;
199 };
200
201 /* Program points are enumerated by numbers from range
202 0..IRA_MAX_POINT-1. There are approximately two times more program
203 points than insns. Program points are places in the program where
204 liveness info can be changed. In most general case (there are more
205 complicated cases too) some program points correspond to places
206 where input operand dies and other ones correspond to places where
207 output operands are born. */
208 extern int ira_max_point;
209
210 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
211 live ranges with given start/finish point. */
212 extern allocno_live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
213
214 /* A structure representing an allocno (allocation entity). Allocno
215 represents a pseudo-register in an allocation region. If
216 pseudo-register does not live in a region but it lives in the
217 nested regions, it is represented in the region by special allocno
218 called *cap*. There may be more one cap representing the same
219 pseudo-register in region. It means that the corresponding
220 pseudo-register lives in more one non-intersected subregion. */
221 struct ira_allocno
222 {
223 /* The allocno order number starting with 0. Each allocno has an
224 unique number and the number is never changed for the
225 allocno. */
226 int num;
227 /* Regno for allocno or cap. */
228 int regno;
229 /* Mode of the allocno which is the mode of the corresponding
230 pseudo-register. */
231 enum machine_mode mode;
232 /* Final rtx representation of the allocno. */
233 rtx reg;
234 /* Hard register assigned to given allocno. Negative value means
235 that memory was allocated to the allocno. During the reload,
236 spilled allocno has value equal to the corresponding stack slot
237 number (0, ...) - 2. Value -1 is used for allocnos spilled by the
238 reload (at this point pseudo-register has only one allocno) which
239 did not get stack slot yet. */
240 int hard_regno;
241 /* Allocnos with the same regno are linked by the following member.
242 Allocnos corresponding to inner loops are first in the list (it
243 corresponds to depth-first traverse of the loops). */
244 ira_allocno_t next_regno_allocno;
245 /* There may be different allocnos with the same regno in different
246 regions. Allocnos are bound to the corresponding loop tree node.
247 Pseudo-register may have only one regular allocno with given loop
248 tree node but more than one cap (see comments above). */
249 ira_loop_tree_node_t loop_tree_node;
250 /* Accumulated usage references of the allocno. Here and below,
251 word 'accumulated' means info for given region and all nested
252 subregions. In this case, 'accumulated' means sum of references
253 of the corresponding pseudo-register in this region and in all
254 nested subregions recursively. */
255 int nrefs;
256 /* Accumulated frequency of usage of the allocno. */
257 int freq;
258 /* Register class which should be used for allocation for given
259 allocno. NO_REGS means that we should use memory. */
260 enum reg_class cover_class;
261 /* Minimal accumulated cost of usage register of the cover class for
262 the allocno. */
263 int cover_class_cost;
264 /* Minimal accumulated, and updated costs of memory for the allocno.
265 At the allocation start, the original and updated costs are
266 equal. The updated cost may be changed after finishing
267 allocation in a region and starting allocation in a subregion.
268 The change reflects the cost of spill/restore code on the
269 subregion border if we assign memory to the pseudo in the
270 subregion. */
271 int memory_cost, updated_memory_cost;
272 /* Accumulated number of points where the allocno lives and there is
273 excess pressure for its class. Excess pressure for a register
274 class at some point means that there are more allocnos of given
275 register class living at the point than number of hard-registers
276 of the class available for the allocation. */
277 int excess_pressure_points_num;
278 /* Copies to other non-conflicting allocnos. The copies can
279 represent move insn or potential move insn usually because of two
280 operand insn constraints. */
281 ira_copy_t allocno_copies;
282 /* It is a allocno (cap) representing given allocno on upper loop tree
283 level. */
284 ira_allocno_t cap;
285 /* It is a link to allocno (cap) on lower loop level represented by
286 given cap. Null if given allocno is not a cap. */
287 ira_allocno_t cap_member;
288 /* Coalesced allocnos form a cyclic list. One allocno given by
289 FIRST_COALESCED_ALLOCNO represents all coalesced allocnos. The
290 list is chained by NEXT_COALESCED_ALLOCNO. */
291 ira_allocno_t first_coalesced_allocno;
292 ira_allocno_t next_coalesced_allocno;
293 /* Pointer to structures describing at what program point the
294 allocno lives. We always maintain the list in such way that *the
295 ranges in the list are not intersected and ordered by decreasing
296 their program points*. */
297 allocno_live_range_t live_ranges;
298 /* Before building conflicts the two member values are
299 correspondingly minimal and maximal points of the accumulated
300 allocno live ranges. After building conflicts the values are
301 correspondingly minimal and maximal conflict ids of allocnos with
302 which given allocno can conflict. */
303 int min, max;
304 /* The unique member value represents given allocno in conflict bit
305 vectors. */
306 int conflict_id;
307 /* Vector of accumulated conflicting allocnos with NULL end marker
308 (if CONFLICT_VEC_P is true) or conflict bit vector otherwise.
309 Only allocnos with the same cover class are in the vector or in
310 the bit vector. */
311 void *conflict_allocno_array;
312 /* Allocated size of the previous array. */
313 unsigned int conflict_allocno_array_size;
314 /* Number of accumulated conflicts in the vector of conflicting
315 allocnos. */
316 int conflict_allocnos_num;
317 /* Initial and accumulated hard registers conflicting with this
318 allocno and as a consequences can not be assigned to the allocno.
319 All non-allocatable hard regs and hard regs of cover classes
320 different from given allocno one are included in the sets. */
321 HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
322 /* Accumulated frequency of calls which given allocno
323 intersects. */
324 int call_freq;
325 /* Length of the previous array (number of the intersected calls). */
326 int calls_crossed_num;
327 /* Non NULL if we remove restoring value from given allocno to
328 MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
329 allocno value is not changed inside the loop. */
330 ira_allocno_t mem_optimized_dest;
331 /* TRUE if the allocno assigned to memory was a destination of
332 removed move (see ira-emit.c) at loop exit because the value of
333 the corresponding pseudo-register is not changed inside the
334 loop. */
335 unsigned int mem_optimized_dest_p : 1;
336 /* TRUE if the corresponding pseudo-register has disjoint live
337 ranges and the other allocnos of the pseudo-register except this
338 one changed REG. */
339 unsigned int somewhere_renamed_p : 1;
340 /* TRUE if allocno with the same REGNO in a subregion has been
341 renamed, in other words, got a new pseudo-register. */
342 unsigned int child_renamed_p : 1;
343 /* During the reload, value TRUE means that we should not reassign a
344 hard register to the allocno got memory earlier. It is set up
345 when we removed memory-memory move insn before each iteration of
346 the reload. */
347 unsigned int dont_reassign_p : 1;
348 #ifdef STACK_REGS
349 /* Set to TRUE if allocno can't be assigned to the stack hard
350 register correspondingly in this region and area including the
351 region and all its subregions recursively. */
352 unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
353 #endif
354 /* TRUE value means that the allocno was not removed yet from the
355 conflicting graph during colouring. */
356 unsigned int in_graph_p : 1;
357 /* TRUE if a hard register or memory has been assigned to the
358 allocno. */
359 unsigned int assigned_p : 1;
360 /* TRUE if it is put on the stack to make other allocnos
361 colorable. */
362 unsigned int may_be_spilled_p : 1;
363 /* TRUE if the allocno was removed from the splay tree used to
364 choose allocn for spilling (see ira-color.c::. */
365 unsigned int splay_removed_p : 1;
366 /* TRUE if conflicts for given allocno are represented by vector of
367 pointers to the conflicting allocnos. Otherwise, we use a bit
368 vector where a bit with given index represents allocno with the
369 same number. */
370 unsigned int conflict_vec_p : 1;
371 /* Array of usage costs (accumulated and the one updated during
372 coloring) for each hard register of the allocno cover class. The
373 member value can be NULL if all costs are the same and equal to
374 COVER_CLASS_COST. For example, the costs of two different hard
375 registers can be different if one hard register is callee-saved
376 and another one is callee-used and the allocno lives through
377 calls. Another example can be case when for some insn the
378 corresponding pseudo-register value should be put in specific
379 register class (e.g. AREG for x86) which is a strict subset of
380 the allocno cover class (GENERAL_REGS for x86). We have updated
381 costs to reflect the situation when the usage cost of a hard
382 register is decreased because the allocno is connected to another
383 allocno by a copy and the another allocno has been assigned to
384 the hard register. */
385 int *hard_reg_costs, *updated_hard_reg_costs;
386 /* Array of decreasing costs (accumulated and the one updated during
387 coloring) for allocnos conflicting with given allocno for hard
388 regno of the allocno cover class. The member value can be NULL
389 if all costs are the same. These costs are used to reflect
390 preferences of other allocnos not assigned yet during assigning
391 to given allocno. */
392 int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
393 /* Number of the same cover class allocnos with TRUE in_graph_p
394 value and conflicting with given allocno during each point of
395 graph coloring. */
396 int left_conflicts_num;
397 /* Number of hard registers of the allocno cover class really
398 available for the allocno allocation. */
399 int available_regs_num;
400 /* Allocnos in a bucket (used in coloring) chained by the following
401 two members. */
402 ira_allocno_t next_bucket_allocno;
403 ira_allocno_t prev_bucket_allocno;
404 /* Used for temporary purposes. */
405 int temp;
406 };
407
408 /* All members of the allocno structures should be accessed only
409 through the following macros. */
410 #define ALLOCNO_NUM(A) ((A)->num)
411 #define ALLOCNO_REGNO(A) ((A)->regno)
412 #define ALLOCNO_REG(A) ((A)->reg)
413 #define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
414 #define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
415 #define ALLOCNO_CAP(A) ((A)->cap)
416 #define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
417 #define ALLOCNO_CONFLICT_ALLOCNO_ARRAY(A) ((A)->conflict_allocno_array)
418 #define ALLOCNO_CONFLICT_ALLOCNO_ARRAY_SIZE(A) \
419 ((A)->conflict_allocno_array_size)
420 #define ALLOCNO_CONFLICT_ALLOCNOS_NUM(A) \
421 ((A)->conflict_allocnos_num)
422 #define ALLOCNO_CONFLICT_HARD_REGS(A) ((A)->conflict_hard_regs)
423 #define ALLOCNO_TOTAL_CONFLICT_HARD_REGS(A) ((A)->total_conflict_hard_regs)
424 #define ALLOCNO_NREFS(A) ((A)->nrefs)
425 #define ALLOCNO_FREQ(A) ((A)->freq)
426 #define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
427 #define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
428 #define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
429 #define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
430 #define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
431 #define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
432 #define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
433 #define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
434 #ifdef STACK_REGS
435 #define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
436 #define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
437 #endif
438 #define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
439 #define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
440 #define ALLOCNO_MAY_BE_SPILLED_P(A) ((A)->may_be_spilled_p)
441 #define ALLOCNO_SPLAY_REMOVED_P(A) ((A)->splay_removed_p)
442 #define ALLOCNO_CONFLICT_VEC_P(A) ((A)->conflict_vec_p)
443 #define ALLOCNO_MODE(A) ((A)->mode)
444 #define ALLOCNO_COPIES(A) ((A)->allocno_copies)
445 #define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
446 #define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
447 #define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
448 ((A)->conflict_hard_reg_costs)
449 #define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
450 ((A)->updated_conflict_hard_reg_costs)
451 #define ALLOCNO_LEFT_CONFLICTS_NUM(A) ((A)->left_conflicts_num)
452 #define ALLOCNO_COVER_CLASS(A) ((A)->cover_class)
453 #define ALLOCNO_COVER_CLASS_COST(A) ((A)->cover_class_cost)
454 #define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
455 #define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
456 #define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) ((A)->excess_pressure_points_num)
457 #define ALLOCNO_AVAILABLE_REGS_NUM(A) ((A)->available_regs_num)
458 #define ALLOCNO_NEXT_BUCKET_ALLOCNO(A) ((A)->next_bucket_allocno)
459 #define ALLOCNO_PREV_BUCKET_ALLOCNO(A) ((A)->prev_bucket_allocno)
460 #define ALLOCNO_TEMP(A) ((A)->temp)
461 #define ALLOCNO_FIRST_COALESCED_ALLOCNO(A) ((A)->first_coalesced_allocno)
462 #define ALLOCNO_NEXT_COALESCED_ALLOCNO(A) ((A)->next_coalesced_allocno)
463 #define ALLOCNO_LIVE_RANGES(A) ((A)->live_ranges)
464 #define ALLOCNO_MIN(A) ((A)->min)
465 #define ALLOCNO_MAX(A) ((A)->max)
466 #define ALLOCNO_CONFLICT_ID(A) ((A)->conflict_id)
467
468 /* Map regno -> allocnos with given regno (see comments for
469 allocno member `next_regno_allocno'). */
470 extern ira_allocno_t *ira_regno_allocno_map;
471
472 /* Array of references to all allocnos. The order number of the
473 allocno corresponds to the index in the array. Removed allocnos
474 have NULL element value. */
475 extern ira_allocno_t *ira_allocnos;
476
477 /* Sizes of the previous array. */
478 extern int ira_allocnos_num;
479
480 /* Map conflict id -> allocno with given conflict id (see comments for
481 allocno member `conflict_id'). */
482 extern ira_allocno_t *ira_conflict_id_allocno_map;
483
484 /* The following structure represents a copy of two allocnos. The
485 copies represent move insns or potential move insns usually because
486 of two operand insn constraints. To remove register shuffle, we
487 also create copies between allocno which is output of an insn and
488 allocno becoming dead in the insn. */
489 struct ira_allocno_copy
490 {
491 /* The unique order number of the copy node starting with 0. */
492 int num;
493 /* Allocnos connected by the copy. The first allocno should have
494 smaller order number than the second one. */
495 ira_allocno_t first, second;
496 /* Execution frequency of the copy. */
497 int freq;
498 /* It is a move insn which is an origin of the copy. The member
499 value for the copy representing two operand insn constraints or
500 for the copy created to remove register shuffle is NULL. In last
501 case the copy frequency is smaller than the corresponding insn
502 execution frequency. */
503 rtx insn;
504 /* All copies with the same allocno as FIRST are linked by the two
505 following members. */
506 ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
507 /* All copies with the same allocno as SECOND are linked by the two
508 following members. */
509 ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
510 /* Region from which given copy is originated. */
511 ira_loop_tree_node_t loop_tree_node;
512 };
513
514 /* Array of references to all copies. The order number of the copy
515 corresponds to the index in the array. Removed copies have NULL
516 element value. */
517 extern ira_copy_t *ira_copies;
518
519 /* Size of the previous array. */
520 extern int ira_copies_num;
521
522 /* The following structure describes a stack slot used for spilled
523 pseudo-registers. */
524 struct ira_spilled_reg_stack_slot
525 {
526 /* pseudo-registers assigned to the stack slot. */
527 regset_head spilled_regs;
528 /* RTL representation of the stack slot. */
529 rtx mem;
530 /* Size of the stack slot. */
531 unsigned int width;
532 };
533
534 /* The number of elements in the following array. */
535 extern int ira_spilled_reg_stack_slots_num;
536
537 /* The following array contains info about spilled pseudo-registers
538 stack slots used in current function so far. */
539 extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
540
541 /* Correspondingly overall cost of the allocation, cost of the
542 allocnos assigned to hard-registers, cost of the allocnos assigned
543 to memory, cost of loads, stores and register move insns generated
544 for pseudo-register live range splitting (see ira-emit.c). */
545 extern int ira_overall_cost;
546 extern int ira_reg_cost, ira_mem_cost;
547 extern int ira_load_cost, ira_store_cost, ira_shuffle_cost;
548 extern int ira_move_loops_num, ira_additional_jumps_num;
549
550 /* Map: hard register number -> cover class it belongs to. If the
551 corresponding class is NO_REGS, the hard register is not available
552 for allocation. */
553 extern enum reg_class ira_hard_regno_cover_class[FIRST_PSEUDO_REGISTER];
554
555 /* Map: register class x machine mode -> number of hard registers of
556 given class needed to store value of given mode. If the number for
557 some hard-registers of the register class is different, the size
558 will be negative. */
559 extern int ira_reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
560
561 /* Maximal value of the previous array elements. */
562 extern int ira_max_nregs;
563
564 /* The number of bits in each element of array used to implement a bit
565 vector of allocnos and what type that element has. We use the
566 largest integer format on the host machine. */
567 #define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
568 #define IRA_INT_TYPE HOST_WIDE_INT
569
570 /* Set, clear or test bit number I in R, a bit vector of elements with
571 minimal index and maximal index equal correspondingly to MIN and
572 MAX. */
573 #if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
574
575 #define SET_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
576 (({ int _min = (MIN), _max = (MAX), _i = (I); \
577 if (_i < _min || _i > _max) \
578 { \
579 fprintf (stderr, \
580 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
581 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
582 gcc_unreachable (); \
583 } \
584 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
585 |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
586
587
588 #define CLEAR_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
589 (({ int _min = (MIN), _max = (MAX), _i = (I); \
590 if (_i < _min || _i > _max) \
591 { \
592 fprintf (stderr, \
593 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
594 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
595 gcc_unreachable (); \
596 } \
597 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
598 &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
599
600 #define TEST_ALLOCNO_SET_BIT(R, I, MIN, MAX) __extension__ \
601 (({ int _min = (MIN), _max = (MAX), _i = (I); \
602 if (_i < _min || _i > _max) \
603 { \
604 fprintf (stderr, \
605 "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
606 __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
607 gcc_unreachable (); \
608 } \
609 ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
610 & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
611
612 #else
613
614 #define SET_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
615 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
616 |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
617
618 #define CLEAR_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
619 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
620 &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
621
622 #define TEST_ALLOCNO_SET_BIT(R, I, MIN, MAX) \
623 ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
624 & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
625
626 #endif
627
628 /* The iterator for allocno set implemented ed as allocno bit
629 vector. */
630 typedef struct {
631
632 /* Array containing the allocno bit vector. */
633 IRA_INT_TYPE *vec;
634
635 /* The number of the current element in the vector. */
636 unsigned int word_num;
637
638 /* The number of bits in the bit vector. */
639 unsigned int nel;
640
641 /* The current bit index of the bit vector. */
642 unsigned int bit_num;
643
644 /* Index corresponding to the 1st bit of the bit vector. */
645 int start_val;
646
647 /* The word of the bit vector currently visited. */
648 unsigned IRA_INT_TYPE word;
649 } ira_allocno_set_iterator;
650
651 /* Initialize the iterator I for allocnos bit vector VEC containing
652 minimal and maximal values MIN and MAX. */
653 static inline void
654 ira_allocno_set_iter_init (ira_allocno_set_iterator *i,
655 IRA_INT_TYPE *vec, int min, int max)
656 {
657 i->vec = vec;
658 i->word_num = 0;
659 i->nel = max < min ? 0 : max - min + 1;
660 i->start_val = min;
661 i->bit_num = 0;
662 i->word = i->nel == 0 ? 0 : vec[0];
663 }
664
665 /* Return TRUE if we have more allocnos to visit, in which case *N is
666 set to the allocno number to be visited. Otherwise, return
667 FALSE. */
668 static inline bool
669 ira_allocno_set_iter_cond (ira_allocno_set_iterator *i, int *n)
670 {
671 /* Skip words that are zeros. */
672 for (; i->word == 0; i->word = i->vec[i->word_num])
673 {
674 i->word_num++;
675 i->bit_num = i->word_num * IRA_INT_BITS;
676
677 /* If we have reached the end, break. */
678 if (i->bit_num >= i->nel)
679 return false;
680 }
681
682 /* Skip bits that are zero. */
683 for (; (i->word & 1) == 0; i->word >>= 1)
684 i->bit_num++;
685
686 *n = (int) i->bit_num + i->start_val;
687
688 return true;
689 }
690
691 /* Advance to the next allocno in the set. */
692 static inline void
693 ira_allocno_set_iter_next (ira_allocno_set_iterator *i)
694 {
695 i->word >>= 1;
696 i->bit_num++;
697 }
698
699 /* Loop over all elements of allocno set given by bit vector VEC and
700 their minimal and maximal values MIN and MAX. In each iteration, N
701 is set to the number of next allocno. ITER is an instance of
702 ira_allocno_set_iterator used to iterate the allocnos in the set. */
703 #define FOR_EACH_ALLOCNO_IN_SET(VEC, MIN, MAX, N, ITER) \
704 for (ira_allocno_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
705 ira_allocno_set_iter_cond (&(ITER), &(N)); \
706 ira_allocno_set_iter_next (&(ITER)))
707
708 /* ira.c: */
709
710 /* Map: hard regs X modes -> set of hard registers for storing value
711 of given mode starting with given hard register. */
712 extern HARD_REG_SET ira_reg_mode_hard_regset
713 [FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
714
715 /* Arrays analogous to macros MEMORY_MOVE_COST and
716 REGISTER_MOVE_COST. */
717 extern short ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
718 extern move_table *ira_register_move_cost[MAX_MACHINE_MODE];
719
720 /* Similar to may_move_in_cost but it is calculated in IRA instead of
721 regclass. Another difference we take only available hard registers
722 into account to figure out that one register class is a subset of
723 the another one. */
724 extern move_table *ira_may_move_in_cost[MAX_MACHINE_MODE];
725
726 /* Similar to may_move_out_cost but it is calculated in IRA instead of
727 regclass. Another difference we take only available hard registers
728 into account to figure out that one register class is a subset of
729 the another one. */
730 extern move_table *ira_may_move_out_cost[MAX_MACHINE_MODE];
731
732 /* Register class subset relation: TRUE if the first class is a subset
733 of the second one considering only hard registers available for the
734 allocation. */
735 extern int ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
736
737 /* Array of number of hard registers of given class which are
738 available for the allocation. The order is defined by the
739 allocation order. */
740 extern short ira_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
741
742 /* The number of elements of the above array for given register
743 class. */
744 extern int ira_class_hard_regs_num[N_REG_CLASSES];
745
746 /* Index (in ira_class_hard_regs) for given register class and hard
747 register (in general case a hard register can belong to several
748 register classes). The index is negative for hard registers
749 unavailable for the allocation. */
750 extern short ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
751
752 /* Function specific hard registers can not be used for the register
753 allocation. */
754 extern HARD_REG_SET ira_no_alloc_regs;
755
756 /* Number of given class hard registers available for the register
757 allocation for given classes. */
758 extern int ira_available_class_regs[N_REG_CLASSES];
759
760 /* Array whose values are hard regset of hard registers available for
761 the allocation of given register class whose HARD_REGNO_MODE_OK
762 values for given mode are zero. */
763 extern HARD_REG_SET prohibited_class_mode_regs
764 [N_REG_CLASSES][NUM_MACHINE_MODES];
765
766 /* Array whose values are hard regset of hard registers for which
767 move of the hard register in given mode into itself is
768 prohibited. */
769 extern HARD_REG_SET ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
770
771 /* Number of cover classes. Cover classes is non-intersected register
772 classes containing all hard-registers available for the
773 allocation. */
774 extern int ira_reg_class_cover_size;
775
776 /* The array containing cover classes (see also comments for macro
777 IRA_COVER_CLASSES). Only first IRA_REG_CLASS_COVER_SIZE elements are
778 used for this. */
779 extern enum reg_class ira_reg_class_cover[N_REG_CLASSES];
780
781 /* The value is number of elements in the subsequent array. */
782 extern int ira_important_classes_num;
783
784 /* The array containing non-empty classes (including non-empty cover
785 classes) which are subclasses of cover classes. Such classes is
786 important for calculation of the hard register usage costs. */
787 extern enum reg_class ira_important_classes[N_REG_CLASSES];
788
789 /* The array containing indexes of important classes in the previous
790 array. The array elements are defined only for important
791 classes. */
792 extern int ira_important_class_nums[N_REG_CLASSES];
793
794 /* Map of all register classes to corresponding cover class containing
795 the given class. If given class is not a subset of a cover class,
796 we translate it into the cheapest cover class. */
797 extern enum reg_class ira_class_translate[N_REG_CLASSES];
798
799 /* The biggest important class inside of intersection of the two
800 classes (that is calculated taking only hard registers available
801 for allocation into account). If the both classes contain no hard
802 registers available for allocation, the value is calculated with
803 taking all hard-registers including fixed ones into account. */
804 extern enum reg_class ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
805
806 /* The biggest important class inside of union of the two classes
807 (that is calculated taking only hard registers available for
808 allocation into account). If the both classes contain no hard
809 registers available for allocation, the value is calculated with
810 taking all hard-registers including fixed ones into account. In
811 other words, the value is the corresponding reg_class_subunion
812 value. */
813 extern enum reg_class ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
814
815 extern void *ira_allocate (size_t);
816 extern void *ira_reallocate (void *, size_t);
817 extern void ira_free (void *addr);
818 extern bitmap ira_allocate_bitmap (void);
819 extern void ira_free_bitmap (bitmap);
820 extern void ira_print_disposition (FILE *);
821 extern void ira_debug_disposition (void);
822 extern void ira_debug_class_cover (void);
823 extern void ira_init_register_move_cost (enum machine_mode);
824
825 /* The length of the two following arrays. */
826 extern int ira_reg_equiv_len;
827
828 /* The element value is TRUE if the corresponding regno value is
829 invariant. */
830 extern bool *ira_reg_equiv_invariant_p;
831
832 /* The element value is equiv constant of given pseudo-register or
833 NULL_RTX. */
834 extern rtx *ira_reg_equiv_const;
835
836 /* ira-build.c */
837
838 /* The current loop tree node and its regno allocno map. */
839 extern ira_loop_tree_node_t ira_curr_loop_tree_node;
840 extern ira_allocno_t *ira_curr_regno_allocno_map;
841
842 extern void ira_debug_copy (ira_copy_t);
843 extern void ira_debug_copies (void);
844 extern void ira_debug_allocno_copies (ira_allocno_t);
845
846 extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
847 void (*) (ira_loop_tree_node_t),
848 void (*) (ira_loop_tree_node_t));
849 extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
850 extern void ira_set_allocno_cover_class (ira_allocno_t, enum reg_class);
851 extern bool ira_conflict_vector_profitable_p (ira_allocno_t, int);
852 extern void ira_allocate_allocno_conflict_vec (ira_allocno_t, int);
853 extern void ira_allocate_allocno_conflicts (ira_allocno_t, int);
854 extern void ira_add_allocno_conflict (ira_allocno_t, ira_allocno_t);
855 extern void ira_print_expanded_allocno (ira_allocno_t);
856 extern allocno_live_range_t ira_create_allocno_live_range
857 (ira_allocno_t, int, int, allocno_live_range_t);
858 extern void ira_finish_allocno_live_range (allocno_live_range_t);
859 extern void ira_free_allocno_updated_costs (ira_allocno_t);
860 extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
861 int, rtx, ira_loop_tree_node_t);
862 extern void ira_add_allocno_copy_to_list (ira_copy_t);
863 extern void ira_swap_allocno_copy_ends_if_necessary (ira_copy_t);
864 extern void ira_remove_allocno_copy_from_list (ira_copy_t);
865 extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int, rtx,
866 ira_loop_tree_node_t);
867
868 extern int *ira_allocate_cost_vector (enum reg_class);
869 extern void ira_free_cost_vector (int *, enum reg_class);
870
871 extern void ira_flattening (int, int);
872 extern bool ira_build (bool);
873 extern void ira_destroy (void);
874
875 /* ira-costs.c */
876 extern void ira_init_costs_once (void);
877 extern void ira_init_costs (void);
878 extern void ira_finish_costs_once (void);
879 extern void ira_costs (void);
880 extern void ira_tune_allocno_costs_and_cover_classes (void);
881
882 /* ira-lives.c */
883
884 extern void ira_rebuild_start_finish_chains (void);
885 extern void ira_print_live_range_list (FILE *, allocno_live_range_t);
886 extern void ira_debug_live_range_list (allocno_live_range_t);
887 extern void ira_debug_allocno_live_ranges (ira_allocno_t);
888 extern void ira_debug_live_ranges (void);
889 extern void ira_create_allocno_live_ranges (void);
890 extern void ira_compress_allocno_live_ranges (void);
891 extern void ira_finish_allocno_live_ranges (void);
892
893 /* ira-conflicts.c */
894 extern bool ira_allocno_live_ranges_intersect_p (ira_allocno_t, ira_allocno_t);
895 extern bool ira_pseudo_live_ranges_intersect_p (int, int);
896 extern void ira_debug_conflicts (bool);
897 extern void ira_build_conflicts (void);
898
899 /* ira-color.c */
900 extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
901 extern void ira_reassign_conflict_allocnos (int);
902 extern void ira_initiate_assign (void);
903 extern void ira_finish_assign (void);
904 extern void ira_color (void);
905 extern void ira_fast_allocation (void);
906
907 /* ira-emit.c */
908 extern void ira_emit (bool);
909
910 \f
911
912 /* The iterator for all allocnos. */
913 typedef struct {
914 /* The number of the current element in IRA_ALLOCNOS. */
915 int n;
916 } ira_allocno_iterator;
917
918 /* Initialize the iterator I. */
919 static inline void
920 ira_allocno_iter_init (ira_allocno_iterator *i)
921 {
922 i->n = 0;
923 }
924
925 /* Return TRUE if we have more allocnos to visit, in which case *A is
926 set to the allocno to be visited. Otherwise, return FALSE. */
927 static inline bool
928 ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
929 {
930 int n;
931
932 for (n = i->n; n < ira_allocnos_num; n++)
933 if (ira_allocnos[n] != NULL)
934 {
935 *a = ira_allocnos[n];
936 i->n = n + 1;
937 return true;
938 }
939 return false;
940 }
941
942 /* Loop over all allocnos. In each iteration, A is set to the next
943 allocno. ITER is an instance of ira_allocno_iterator used to iterate
944 the allocnos. */
945 #define FOR_EACH_ALLOCNO(A, ITER) \
946 for (ira_allocno_iter_init (&(ITER)); \
947 ira_allocno_iter_cond (&(ITER), &(A));)
948
949
950 \f
951
952 /* The iterator for copies. */
953 typedef struct {
954 /* The number of the current element in IRA_COPIES. */
955 int n;
956 } ira_copy_iterator;
957
958 /* Initialize the iterator I. */
959 static inline void
960 ira_copy_iter_init (ira_copy_iterator *i)
961 {
962 i->n = 0;
963 }
964
965 /* Return TRUE if we have more copies to visit, in which case *CP is
966 set to the copy to be visited. Otherwise, return FALSE. */
967 static inline bool
968 ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
969 {
970 int n;
971
972 for (n = i->n; n < ira_copies_num; n++)
973 if (ira_copies[n] != NULL)
974 {
975 *cp = ira_copies[n];
976 i->n = n + 1;
977 return true;
978 }
979 return false;
980 }
981
982 /* Loop over all copies. In each iteration, C is set to the next
983 copy. ITER is an instance of ira_copy_iterator used to iterate
984 the copies. */
985 #define FOR_EACH_COPY(C, ITER) \
986 for (ira_copy_iter_init (&(ITER)); \
987 ira_copy_iter_cond (&(ITER), &(C));)
988
989
990 \f
991
992 /* The iterator for allocno conflicts. */
993 typedef struct {
994
995 /* TRUE if the conflicts are represented by vector of allocnos. */
996 bool allocno_conflict_vec_p;
997
998 /* The conflict vector or conflict bit vector. */
999 void *vec;
1000
1001 /* The number of the current element in the vector (of type
1002 ira_allocno_t or IRA_INT_TYPE). */
1003 unsigned int word_num;
1004
1005 /* The bit vector size. It is defined only if
1006 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1007 unsigned int size;
1008
1009 /* The current bit index of bit vector. It is defined only if
1010 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1011 unsigned int bit_num;
1012
1013 /* Allocno conflict id corresponding to the 1st bit of the bit
1014 vector. It is defined only if ALLOCNO_CONFLICT_VEC_P is
1015 FALSE. */
1016 int base_conflict_id;
1017
1018 /* The word of bit vector currently visited. It is defined only if
1019 ALLOCNO_CONFLICT_VEC_P is FALSE. */
1020 unsigned IRA_INT_TYPE word;
1021 } ira_allocno_conflict_iterator;
1022
1023 /* Initialize the iterator I with ALLOCNO conflicts. */
1024 static inline void
1025 ira_allocno_conflict_iter_init (ira_allocno_conflict_iterator *i,
1026 ira_allocno_t allocno)
1027 {
1028 i->allocno_conflict_vec_p = ALLOCNO_CONFLICT_VEC_P (allocno);
1029 i->vec = ALLOCNO_CONFLICT_ALLOCNO_ARRAY (allocno);
1030 i->word_num = 0;
1031 if (i->allocno_conflict_vec_p)
1032 i->size = i->bit_num = i->base_conflict_id = i->word = 0;
1033 else
1034 {
1035 if (ALLOCNO_MIN (allocno) > ALLOCNO_MAX (allocno))
1036 i->size = 0;
1037 else
1038 i->size = ((ALLOCNO_MAX (allocno) - ALLOCNO_MIN (allocno)
1039 + IRA_INT_BITS)
1040 / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
1041 i->bit_num = 0;
1042 i->base_conflict_id = ALLOCNO_MIN (allocno);
1043 i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
1044 }
1045 }
1046
1047 /* Return TRUE if we have more conflicting allocnos to visit, in which
1048 case *A is set to the allocno to be visited. Otherwise, return
1049 FALSE. */
1050 static inline bool
1051 ira_allocno_conflict_iter_cond (ira_allocno_conflict_iterator *i,
1052 ira_allocno_t *a)
1053 {
1054 ira_allocno_t conflict_allocno;
1055
1056 if (i->allocno_conflict_vec_p)
1057 {
1058 conflict_allocno = ((ira_allocno_t *) i->vec)[i->word_num];
1059 if (conflict_allocno == NULL)
1060 return false;
1061 *a = conflict_allocno;
1062 return true;
1063 }
1064 else
1065 {
1066 /* Skip words that are zeros. */
1067 for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
1068 {
1069 i->word_num++;
1070
1071 /* If we have reached the end, break. */
1072 if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
1073 return false;
1074
1075 i->bit_num = i->word_num * IRA_INT_BITS;
1076 }
1077
1078 /* Skip bits that are zero. */
1079 for (; (i->word & 1) == 0; i->word >>= 1)
1080 i->bit_num++;
1081
1082 *a = ira_conflict_id_allocno_map[i->bit_num + i->base_conflict_id];
1083
1084 return true;
1085 }
1086 }
1087
1088 /* Advance to the next conflicting allocno. */
1089 static inline void
1090 ira_allocno_conflict_iter_next (ira_allocno_conflict_iterator *i)
1091 {
1092 if (i->allocno_conflict_vec_p)
1093 i->word_num++;
1094 else
1095 {
1096 i->word >>= 1;
1097 i->bit_num++;
1098 }
1099 }
1100
1101 /* Loop over all allocnos conflicting with ALLOCNO. In each
1102 iteration, A is set to the next conflicting allocno. ITER is an
1103 instance of ira_allocno_conflict_iterator used to iterate the
1104 conflicts. */
1105 #define FOR_EACH_ALLOCNO_CONFLICT(ALLOCNO, A, ITER) \
1106 for (ira_allocno_conflict_iter_init (&(ITER), (ALLOCNO)); \
1107 ira_allocno_conflict_iter_cond (&(ITER), &(A)); \
1108 ira_allocno_conflict_iter_next (&(ITER)))
1109
1110 \f
1111
1112 /* The function returns TRUE if hard registers starting with
1113 HARD_REGNO and containing value of MODE are not in set
1114 HARD_REGSET. */
1115 static inline bool
1116 ira_hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
1117 HARD_REG_SET hard_regset)
1118 {
1119 int i;
1120
1121 ira_assert (hard_regno >= 0);
1122 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1123 if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
1124 return false;
1125 return true;
1126 }
1127
1128 \f
1129
1130 /* To save memory we use a lazy approach for allocation and
1131 initialization of the cost vectors. We do this only when it is
1132 really necessary. */
1133
1134 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1135 initialize the elements by VAL if it is necessary */
1136 static inline void
1137 ira_allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
1138 {
1139 int i, *reg_costs;
1140 int len;
1141
1142 if (*vec != NULL)
1143 return;
1144 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1145 len = ira_class_hard_regs_num[cover_class];
1146 for (i = 0; i < len; i++)
1147 reg_costs[i] = val;
1148 }
1149
1150 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1151 copy values of vector SRC into the vector if it is necessary */
1152 static inline void
1153 ira_allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
1154 {
1155 int len;
1156
1157 if (*vec != NULL || src == NULL)
1158 return;
1159 *vec = ira_allocate_cost_vector (cover_class);
1160 len = ira_class_hard_regs_num[cover_class];
1161 memcpy (*vec, src, sizeof (int) * len);
1162 }
1163
1164 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1165 add values of vector SRC into the vector if it is necessary */
1166 static inline void
1167 ira_allocate_and_accumulate_costs (int **vec, enum reg_class cover_class,
1168 int *src)
1169 {
1170 int i, len;
1171
1172 if (src == NULL)
1173 return;
1174 len = ira_class_hard_regs_num[cover_class];
1175 if (*vec == NULL)
1176 {
1177 *vec = ira_allocate_cost_vector (cover_class);
1178 memset (*vec, 0, sizeof (int) * len);
1179 }
1180 for (i = 0; i < len; i++)
1181 (*vec)[i] += src[i];
1182 }
1183
1184 /* Allocate cost vector *VEC for hard registers of COVER_CLASS and
1185 copy values of vector SRC into the vector or initialize it by VAL
1186 (if SRC is null). */
1187 static inline void
1188 ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
1189 int val, int *src)
1190 {
1191 int i, *reg_costs;
1192 int len;
1193
1194 if (*vec != NULL)
1195 return;
1196 *vec = reg_costs = ira_allocate_cost_vector (cover_class);
1197 len = ira_class_hard_regs_num[cover_class];
1198 if (src != NULL)
1199 memcpy (reg_costs, src, sizeof (int) * len);
1200 else
1201 {
1202 for (i = 0; i < len; i++)
1203 reg_costs[i] = val;
1204 }
1205 }