]>
Commit | Line | Data |
---|---|---|
058e97ec | 1 | /* Integrated Register Allocator (IRA) entry point. |
a5544970 | 2 | Copyright (C) 2006-2019 Free Software Foundation, Inc. |
058e97ec VM |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 3, or (at your option) any later | |
10 | version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | /* The integrated register allocator (IRA) is a | |
22 | regional register allocator performing graph coloring on a top-down | |
23 | traversal of nested regions. Graph coloring in a region is based | |
24 | on Chaitin-Briggs algorithm. It is called integrated because | |
25 | register coalescing, register live range splitting, and choosing a | |
26 | better hard register are done on-the-fly during coloring. Register | |
27 | coalescing and choosing a cheaper hard register is done by hard | |
28 | register preferencing during hard register assigning. The live | |
29 | range splitting is a byproduct of the regional register allocation. | |
30 | ||
31 | Major IRA notions are: | |
32 | ||
33 | o *Region* is a part of CFG where graph coloring based on | |
34 | Chaitin-Briggs algorithm is done. IRA can work on any set of | |
35 | nested CFG regions forming a tree. Currently the regions are | |
36 | the entire function for the root region and natural loops for | |
37 | the other regions. Therefore data structure representing a | |
38 | region is called loop_tree_node. | |
39 | ||
1756cb66 VM |
40 | o *Allocno class* is a register class used for allocation of |
41 | given allocno. It means that only hard register of given | |
42 | register class can be assigned to given allocno. In reality, | |
43 | even smaller subset of (*profitable*) hard registers can be | |
44 | assigned. In rare cases, the subset can be even smaller | |
45 | because our modification of Chaitin-Briggs algorithm requires | |
46 | that sets of hard registers can be assigned to allocnos forms a | |
47 | forest, i.e. the sets can be ordered in a way where any | |
48 | previous set is not intersected with given set or is a superset | |
49 | of given set. | |
50 | ||
51 | o *Pressure class* is a register class belonging to a set of | |
52 | register classes containing all of the hard-registers available | |
53 | for register allocation. The set of all pressure classes for a | |
54 | target is defined in the corresponding machine-description file | |
55 | according some criteria. Register pressure is calculated only | |
56 | for pressure classes and it affects some IRA decisions as | |
57 | forming allocation regions. | |
058e97ec VM |
58 | |
59 | o *Allocno* represents the live range of a pseudo-register in a | |
60 | region. Besides the obvious attributes like the corresponding | |
1756cb66 | 61 | pseudo-register number, allocno class, conflicting allocnos and |
058e97ec VM |
62 | conflicting hard-registers, there are a few allocno attributes |
63 | which are important for understanding the allocation algorithm: | |
64 | ||
1756cb66 VM |
65 | - *Live ranges*. This is a list of ranges of *program points* |
66 | where the allocno lives. Program points represent places | |
67 | where a pseudo can be born or become dead (there are | |
058e97ec VM |
68 | approximately two times more program points than the insns) |
69 | and they are represented by integers starting with 0. The | |
1756cb66 VM |
70 | live ranges are used to find conflicts between allocnos. |
71 | They also play very important role for the transformation of | |
72 | the IRA internal representation of several regions into a one | |
73 | region representation. The later is used during the reload | |
74 | pass work because each allocno represents all of the | |
75 | corresponding pseudo-registers. | |
058e97ec VM |
76 | |
77 | - *Hard-register costs*. This is a vector of size equal to the | |
1756cb66 VM |
78 | number of available hard-registers of the allocno class. The |
79 | cost of a callee-clobbered hard-register for an allocno is | |
80 | increased by the cost of save/restore code around the calls | |
81 | through the given allocno's life. If the allocno is a move | |
82 | instruction operand and another operand is a hard-register of | |
83 | the allocno class, the cost of the hard-register is decreased | |
84 | by the move cost. | |
058e97ec VM |
85 | |
86 | When an allocno is assigned, the hard-register with minimal | |
87 | full cost is used. Initially, a hard-register's full cost is | |
88 | the corresponding value from the hard-register's cost vector. | |
89 | If the allocno is connected by a *copy* (see below) to | |
90 | another allocno which has just received a hard-register, the | |
91 | cost of the hard-register is decreased. Before choosing a | |
92 | hard-register for an allocno, the allocno's current costs of | |
93 | the hard-registers are modified by the conflict hard-register | |
94 | costs of all of the conflicting allocnos which are not | |
95 | assigned yet. | |
96 | ||
97 | - *Conflict hard-register costs*. This is a vector of the same | |
98 | size as the hard-register costs vector. To permit an | |
99 | unassigned allocno to get a better hard-register, IRA uses | |
100 | this vector to calculate the final full cost of the | |
101 | available hard-registers. Conflict hard-register costs of an | |
102 | unassigned allocno are also changed with a change of the | |
103 | hard-register cost of the allocno when a copy involving the | |
104 | allocno is processed as described above. This is done to | |
105 | show other unassigned allocnos that a given allocno prefers | |
106 | some hard-registers in order to remove the move instruction | |
107 | corresponding to the copy. | |
108 | ||
109 | o *Cap*. If a pseudo-register does not live in a region but | |
110 | lives in a nested region, IRA creates a special allocno called | |
111 | a cap in the outer region. A region cap is also created for a | |
112 | subregion cap. | |
113 | ||
114 | o *Copy*. Allocnos can be connected by copies. Copies are used | |
115 | to modify hard-register costs for allocnos during coloring. | |
116 | Such modifications reflects a preference to use the same | |
117 | hard-register for the allocnos connected by copies. Usually | |
118 | copies are created for move insns (in this case it results in | |
119 | register coalescing). But IRA also creates copies for operands | |
120 | of an insn which should be assigned to the same hard-register | |
121 | due to constraints in the machine description (it usually | |
122 | results in removing a move generated in reload to satisfy | |
123 | the constraints) and copies referring to the allocno which is | |
124 | the output operand of an instruction and the allocno which is | |
125 | an input operand dying in the instruction (creation of such | |
126 | copies results in less register shuffling). IRA *does not* | |
127 | create copies between the same register allocnos from different | |
128 | regions because we use another technique for propagating | |
129 | hard-register preference on the borders of regions. | |
130 | ||
131 | Allocnos (including caps) for the upper region in the region tree | |
132 | *accumulate* information important for coloring from allocnos with | |
133 | the same pseudo-register from nested regions. This includes | |
134 | hard-register and memory costs, conflicts with hard-registers, | |
135 | allocno conflicts, allocno copies and more. *Thus, attributes for | |
136 | allocnos in a region have the same values as if the region had no | |
137 | subregions*. It means that attributes for allocnos in the | |
138 | outermost region corresponding to the function have the same values | |
139 | as though the allocation used only one region which is the entire | |
140 | function. It also means that we can look at IRA work as if the | |
141 | first IRA did allocation for all function then it improved the | |
142 | allocation for loops then their subloops and so on. | |
143 | ||
144 | IRA major passes are: | |
145 | ||
146 | o Building IRA internal representation which consists of the | |
147 | following subpasses: | |
148 | ||
149 | * First, IRA builds regions and creates allocnos (file | |
150 | ira-build.c) and initializes most of their attributes. | |
151 | ||
1756cb66 VM |
152 | * Then IRA finds an allocno class for each allocno and |
153 | calculates its initial (non-accumulated) cost of memory and | |
154 | each hard-register of its allocno class (file ira-cost.c). | |
058e97ec | 155 | |
df3e3493 | 156 | * IRA creates live ranges of each allocno, calculates register |
1756cb66 | 157 | pressure for each pressure class in each region, sets up |
058e97ec VM |
158 | conflict hard registers for each allocno and info about calls |
159 | the allocno lives through (file ira-lives.c). | |
160 | ||
161 | * IRA removes low register pressure loops from the regions | |
162 | mostly to speed IRA up (file ira-build.c). | |
163 | ||
164 | * IRA propagates accumulated allocno info from lower region | |
165 | allocnos to corresponding upper region allocnos (file | |
166 | ira-build.c). | |
167 | ||
168 | * IRA creates all caps (file ira-build.c). | |
169 | ||
1756cb66 VM |
170 | * Having live-ranges of allocnos and their classes, IRA creates |
171 | conflicting allocnos for each allocno. Conflicting allocnos | |
172 | are stored as a bit vector or array of pointers to the | |
173 | conflicting allocnos whatever is more profitable (file | |
174 | ira-conflicts.c). At this point IRA creates allocno copies. | |
058e97ec VM |
175 | |
176 | o Coloring. Now IRA has all necessary info to start graph coloring | |
177 | process. It is done in each region on top-down traverse of the | |
178 | region tree (file ira-color.c). There are following subpasses: | |
b8698a0f | 179 | |
1756cb66 VM |
180 | * Finding profitable hard registers of corresponding allocno |
181 | class for each allocno. For example, only callee-saved hard | |
182 | registers are frequently profitable for allocnos living | |
183 | through colors. If the profitable hard register set of | |
184 | allocno does not form a tree based on subset relation, we use | |
185 | some approximation to form the tree. This approximation is | |
186 | used to figure out trivial colorability of allocnos. The | |
187 | approximation is a pretty rare case. | |
188 | ||
058e97ec VM |
189 | * Putting allocnos onto the coloring stack. IRA uses Briggs |
190 | optimistic coloring which is a major improvement over | |
191 | Chaitin's coloring. Therefore IRA does not spill allocnos at | |
192 | this point. There is some freedom in the order of putting | |
193 | allocnos on the stack which can affect the final result of | |
1756cb66 | 194 | the allocation. IRA uses some heuristics to improve the |
41808d15 VM |
195 | order. The major one is to form *threads* from colorable |
196 | allocnos and push them on the stack by threads. Thread is a | |
197 | set of non-conflicting colorable allocnos connected by | |
198 | copies. The thread contains allocnos from the colorable | |
199 | bucket or colorable allocnos already pushed onto the coloring | |
200 | stack. Pushing thread allocnos one after another onto the | |
201 | stack increases chances of removing copies when the allocnos | |
202 | get the same hard reg. | |
1756cb66 VM |
203 | |
204 | We also use a modification of Chaitin-Briggs algorithm which | |
205 | works for intersected register classes of allocnos. To | |
206 | figure out trivial colorability of allocnos, the mentioned | |
207 | above tree of hard register sets is used. To get an idea how | |
208 | the algorithm works in i386 example, let us consider an | |
209 | allocno to which any general hard register can be assigned. | |
210 | If the allocno conflicts with eight allocnos to which only | |
211 | EAX register can be assigned, given allocno is still | |
212 | trivially colorable because all conflicting allocnos might be | |
213 | assigned only to EAX and all other general hard registers are | |
214 | still free. | |
215 | ||
216 | To get an idea of the used trivial colorability criterion, it | |
217 | is also useful to read article "Graph-Coloring Register | |
218 | Allocation for Irregular Architectures" by Michael D. Smith | |
219 | and Glen Holloway. Major difference between the article | |
220 | approach and approach used in IRA is that Smith's approach | |
221 | takes register classes only from machine description and IRA | |
222 | calculate register classes from intermediate code too | |
223 | (e.g. an explicit usage of hard registers in RTL code for | |
224 | parameter passing can result in creation of additional | |
225 | register classes which contain or exclude the hard | |
226 | registers). That makes IRA approach useful for improving | |
227 | coloring even for architectures with regular register files | |
228 | and in fact some benchmarking shows the improvement for | |
229 | regular class architectures is even bigger than for irregular | |
230 | ones. Another difference is that Smith's approach chooses | |
231 | intersection of classes of all insn operands in which a given | |
232 | pseudo occurs. IRA can use bigger classes if it is still | |
233 | more profitable than memory usage. | |
058e97ec VM |
234 | |
235 | * Popping the allocnos from the stack and assigning them hard | |
67914693 | 236 | registers. If IRA cannot assign a hard register to an |
058e97ec VM |
237 | allocno and the allocno is coalesced, IRA undoes the |
238 | coalescing and puts the uncoalesced allocnos onto the stack in | |
239 | the hope that some such allocnos will get a hard register | |
240 | separately. If IRA fails to assign hard register or memory | |
241 | is more profitable for it, IRA spills the allocno. IRA | |
242 | assigns the allocno the hard-register with minimal full | |
243 | allocation cost which reflects the cost of usage of the | |
244 | hard-register for the allocno and cost of usage of the | |
245 | hard-register for allocnos conflicting with given allocno. | |
246 | ||
1756cb66 | 247 | * Chaitin-Briggs coloring assigns as many pseudos as possible |
df3e3493 | 248 | to hard registers. After coloring we try to improve |
1756cb66 VM |
249 | allocation with cost point of view. We improve the |
250 | allocation by spilling some allocnos and assigning the freed | |
251 | hard registers to other allocnos if it decreases the overall | |
252 | allocation cost. | |
253 | ||
3447fefe | 254 | * After allocno assigning in the region, IRA modifies the hard |
058e97ec VM |
255 | register and memory costs for the corresponding allocnos in |
256 | the subregions to reflect the cost of possible loads, stores, | |
257 | or moves on the border of the region and its subregions. | |
258 | When default regional allocation algorithm is used | |
259 | (-fira-algorithm=mixed), IRA just propagates the assignment | |
260 | for allocnos if the register pressure in the region for the | |
1756cb66 VM |
261 | corresponding pressure class is less than number of available |
262 | hard registers for given pressure class. | |
058e97ec VM |
263 | |
264 | o Spill/restore code moving. When IRA performs an allocation | |
265 | by traversing regions in top-down order, it does not know what | |
266 | happens below in the region tree. Therefore, sometimes IRA | |
267 | misses opportunities to perform a better allocation. A simple | |
268 | optimization tries to improve allocation in a region having | |
269 | subregions and containing in another region. If the | |
270 | corresponding allocnos in the subregion are spilled, it spills | |
271 | the region allocno if it is profitable. The optimization | |
272 | implements a simple iterative algorithm performing profitable | |
273 | transformations while they are still possible. It is fast in | |
274 | practice, so there is no real need for a better time complexity | |
275 | algorithm. | |
276 | ||
1756cb66 VM |
277 | o Code change. After coloring, two allocnos representing the |
278 | same pseudo-register outside and inside a region respectively | |
279 | may be assigned to different locations (hard-registers or | |
280 | memory). In this case IRA creates and uses a new | |
281 | pseudo-register inside the region and adds code to move allocno | |
282 | values on the region's borders. This is done during top-down | |
283 | traversal of the regions (file ira-emit.c). In some | |
284 | complicated cases IRA can create a new allocno to move allocno | |
285 | values (e.g. when a swap of values stored in two hard-registers | |
286 | is needed). At this stage, the new allocno is marked as | |
287 | spilled. IRA still creates the pseudo-register and the moves | |
288 | on the region borders even when both allocnos were assigned to | |
289 | the same hard-register. If the reload pass spills a | |
290 | pseudo-register for some reason, the effect will be smaller | |
291 | because another allocno will still be in the hard-register. In | |
292 | most cases, this is better then spilling both allocnos. If | |
293 | reload does not change the allocation for the two | |
294 | pseudo-registers, the trivial move will be removed by | |
295 | post-reload optimizations. IRA does not generate moves for | |
058e97ec VM |
296 | allocnos assigned to the same hard register when the default |
297 | regional allocation algorithm is used and the register pressure | |
1756cb66 VM |
298 | in the region for the corresponding pressure class is less than |
299 | number of available hard registers for given pressure class. | |
058e97ec VM |
300 | IRA also does some optimizations to remove redundant stores and |
301 | to reduce code duplication on the region borders. | |
302 | ||
303 | o Flattening internal representation. After changing code, IRA | |
304 | transforms its internal representation for several regions into | |
305 | one region representation (file ira-build.c). This process is | |
306 | called IR flattening. Such process is more complicated than IR | |
307 | rebuilding would be, but is much faster. | |
308 | ||
309 | o After IR flattening, IRA tries to assign hard registers to all | |
df3e3493 | 310 | spilled allocnos. This is implemented by a simple and fast |
058e97ec VM |
311 | priority coloring algorithm (see function |
312 | ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos | |
313 | created during the code change pass can be assigned to hard | |
314 | registers. | |
315 | ||
316 | o At the end IRA calls the reload pass. The reload pass | |
317 | communicates with IRA through several functions in file | |
318 | ira-color.c to improve its decisions in | |
319 | ||
320 | * sharing stack slots for the spilled pseudos based on IRA info | |
321 | about pseudo-register conflicts. | |
322 | ||
323 | * reassigning hard-registers to all spilled pseudos at the end | |
324 | of each reload iteration. | |
325 | ||
326 | * choosing a better hard-register to spill based on IRA info | |
327 | about pseudo-register live ranges and the register pressure | |
328 | in places where the pseudo-register lives. | |
329 | ||
330 | IRA uses a lot of data representing the target processors. These | |
df3e3493 | 331 | data are initialized in file ira.c. |
058e97ec VM |
332 | |
333 | If function has no loops (or the loops are ignored when | |
334 | -fira-algorithm=CB is used), we have classic Chaitin-Briggs | |
335 | coloring (only instead of separate pass of coalescing, we use hard | |
336 | register preferencing). In such case, IRA works much faster | |
337 | because many things are not made (like IR flattening, the | |
338 | spill/restore optimization, and the code change). | |
339 | ||
340 | Literature is worth to read for better understanding the code: | |
341 | ||
342 | o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to | |
343 | Graph Coloring Register Allocation. | |
344 | ||
345 | o David Callahan, Brian Koblenz. Register allocation via | |
346 | hierarchical graph coloring. | |
347 | ||
348 | o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph | |
349 | Coloring Register Allocation: A Study of the Chaitin-Briggs and | |
350 | Callahan-Koblenz Algorithms. | |
351 | ||
352 | o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global | |
353 | Register Allocation Based on Graph Fusion. | |
354 | ||
1756cb66 VM |
355 | o Michael D. Smith and Glenn Holloway. Graph-Coloring Register |
356 | Allocation for Irregular Architectures | |
357 | ||
058e97ec VM |
358 | o Vladimir Makarov. The Integrated Register Allocator for GCC. |
359 | ||
360 | o Vladimir Makarov. The top-down register allocator for irregular | |
361 | register file architectures. | |
362 | ||
363 | */ | |
364 | ||
365 | ||
366 | #include "config.h" | |
367 | #include "system.h" | |
368 | #include "coretypes.h" | |
c7131fb2 | 369 | #include "backend.h" |
957060b5 | 370 | #include "target.h" |
058e97ec | 371 | #include "rtl.h" |
957060b5 | 372 | #include "tree.h" |
c7131fb2 | 373 | #include "df.h" |
4d0cdd0c | 374 | #include "memmodel.h" |
957060b5 | 375 | #include "tm_p.h" |
957060b5 | 376 | #include "insn-config.h" |
c7131fb2 | 377 | #include "regs.h" |
957060b5 AM |
378 | #include "ira.h" |
379 | #include "ira-int.h" | |
380 | #include "diagnostic-core.h" | |
60393bbc AM |
381 | #include "cfgrtl.h" |
382 | #include "cfgbuild.h" | |
383 | #include "cfgcleanup.h" | |
058e97ec | 384 | #include "expr.h" |
058e97ec VM |
385 | #include "tree-pass.h" |
386 | #include "output.h" | |
387 | #include "reload.h" | |
c7131fb2 | 388 | #include "cfgloop.h" |
55a2c322 | 389 | #include "lra.h" |
b0c11403 | 390 | #include "dce.h" |
acf41a74 | 391 | #include "dbgcnt.h" |
40954ce5 | 392 | #include "rtl-iter.h" |
a5e022d5 | 393 | #include "shrink-wrap.h" |
013a8899 | 394 | #include "print-rtl.h" |
058e97ec | 395 | |
afcc66c4 RS |
396 | struct target_ira default_target_ira; |
397 | struct target_ira_int default_target_ira_int; | |
398 | #if SWITCHABLE_TARGET | |
399 | struct target_ira *this_target_ira = &default_target_ira; | |
400 | struct target_ira_int *this_target_ira_int = &default_target_ira_int; | |
401 | #endif | |
402 | ||
058e97ec VM |
403 | /* A modified value of flag `-fira-verbose' used internally. */ |
404 | int internal_flag_ira_verbose; | |
405 | ||
406 | /* Dump file of the allocator if it is not NULL. */ | |
407 | FILE *ira_dump_file; | |
408 | ||
058e97ec VM |
409 | /* The number of elements in the following array. */ |
410 | int ira_spilled_reg_stack_slots_num; | |
411 | ||
412 | /* The following array contains info about spilled pseudo-registers | |
413 | stack slots used in current function so far. */ | |
414 | struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots; | |
415 | ||
ae2b9cb6 BS |
416 | /* Correspondingly overall cost of the allocation, overall cost before |
417 | reload, cost of the allocnos assigned to hard-registers, cost of | |
418 | the allocnos assigned to memory, cost of loads, stores and register | |
419 | move insns generated for pseudo-register live range splitting (see | |
420 | ira-emit.c). */ | |
2bf7560b VM |
421 | int64_t ira_overall_cost, overall_cost_before; |
422 | int64_t ira_reg_cost, ira_mem_cost; | |
423 | int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost; | |
058e97ec VM |
424 | int ira_move_loops_num, ira_additional_jumps_num; |
425 | ||
2af2dbdc VM |
426 | /* All registers that can be eliminated. */ |
427 | ||
428 | HARD_REG_SET eliminable_regset; | |
429 | ||
70cc3288 VM |
430 | /* Value of max_reg_num () before IRA work start. This value helps |
431 | us to recognize a situation when new pseudos were created during | |
432 | IRA work. */ | |
433 | static int max_regno_before_ira; | |
434 | ||
058e97ec VM |
435 | /* Temporary hard reg set used for a different calculation. */ |
436 | static HARD_REG_SET temp_hard_regset; | |
437 | ||
e80ccebc RS |
438 | #define last_mode_for_init_move_cost \ |
439 | (this_target_ira_int->x_last_mode_for_init_move_cost) | |
058e97ec VM |
440 | \f |
441 | ||
442 | /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */ | |
443 | static void | |
444 | setup_reg_mode_hard_regset (void) | |
445 | { | |
446 | int i, m, hard_regno; | |
447 | ||
448 | for (m = 0; m < NUM_MACHINE_MODES; m++) | |
449 | for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++) | |
450 | { | |
451 | CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]); | |
ad474626 RS |
452 | for (i = hard_regno_nregs (hard_regno, (machine_mode) m) - 1; |
453 | i >= 0; i--) | |
058e97ec VM |
454 | if (hard_regno + i < FIRST_PSEUDO_REGISTER) |
455 | SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m], | |
456 | hard_regno + i); | |
457 | } | |
458 | } | |
459 | ||
460 | \f | |
afcc66c4 RS |
461 | #define no_unit_alloc_regs \ |
462 | (this_target_ira_int->x_no_unit_alloc_regs) | |
058e97ec VM |
463 | |
464 | /* The function sets up the three arrays declared above. */ | |
465 | static void | |
466 | setup_class_hard_regs (void) | |
467 | { | |
468 | int cl, i, hard_regno, n; | |
469 | HARD_REG_SET processed_hard_reg_set; | |
470 | ||
471 | ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER); | |
058e97ec VM |
472 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
473 | { | |
474 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
475 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
476 | CLEAR_HARD_REG_SET (processed_hard_reg_set); | |
7db7ed3c | 477 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
0583835c | 478 | { |
854edfcd VM |
479 | ira_non_ordered_class_hard_regs[cl][i] = -1; |
480 | ira_class_hard_reg_index[cl][i] = -1; | |
0583835c | 481 | } |
058e97ec VM |
482 | for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
483 | { | |
484 | #ifdef REG_ALLOC_ORDER | |
485 | hard_regno = reg_alloc_order[i]; | |
486 | #else | |
487 | hard_regno = i; | |
b8698a0f | 488 | #endif |
058e97ec VM |
489 | if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno)) |
490 | continue; | |
491 | SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno); | |
492 | if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno)) | |
493 | ira_class_hard_reg_index[cl][hard_regno] = -1; | |
494 | else | |
495 | { | |
496 | ira_class_hard_reg_index[cl][hard_regno] = n; | |
497 | ira_class_hard_regs[cl][n++] = hard_regno; | |
498 | } | |
499 | } | |
500 | ira_class_hard_regs_num[cl] = n; | |
0583835c VM |
501 | for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
502 | if (TEST_HARD_REG_BIT (temp_hard_regset, i)) | |
503 | ira_non_ordered_class_hard_regs[cl][n++] = i; | |
504 | ira_assert (ira_class_hard_regs_num[cl] == n); | |
058e97ec VM |
505 | } |
506 | } | |
507 | ||
058e97ec VM |
508 | /* Set up global variables defining info about hard registers for the |
509 | allocation. These depend on USE_HARD_FRAME_P whose TRUE value means | |
510 | that we can use the hard frame pointer for the allocation. */ | |
511 | static void | |
512 | setup_alloc_regs (bool use_hard_frame_p) | |
513 | { | |
5a733826 BS |
514 | #ifdef ADJUST_REG_ALLOC_ORDER |
515 | ADJUST_REG_ALLOC_ORDER; | |
516 | #endif | |
f80041ef | 517 | COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_nonglobal_reg_set); |
058e97ec VM |
518 | if (! use_hard_frame_p) |
519 | SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM); | |
520 | setup_class_hard_regs (); | |
058e97ec VM |
521 | } |
522 | ||
523 | \f | |
524 | ||
1756cb66 VM |
525 | #define alloc_reg_class_subclasses \ |
526 | (this_target_ira_int->x_alloc_reg_class_subclasses) | |
527 | ||
528 | /* Initialize the table of subclasses of each reg class. */ | |
529 | static void | |
530 | setup_reg_subclasses (void) | |
531 | { | |
532 | int i, j; | |
533 | HARD_REG_SET temp_hard_regset2; | |
534 | ||
535 | for (i = 0; i < N_REG_CLASSES; i++) | |
536 | for (j = 0; j < N_REG_CLASSES; j++) | |
537 | alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES; | |
538 | ||
539 | for (i = 0; i < N_REG_CLASSES; i++) | |
540 | { | |
541 | if (i == (int) NO_REGS) | |
542 | continue; | |
543 | ||
544 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]); | |
545 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
546 | if (hard_reg_set_empty_p (temp_hard_regset)) | |
547 | continue; | |
548 | for (j = 0; j < N_REG_CLASSES; j++) | |
549 | if (i != j) | |
550 | { | |
551 | enum reg_class *p; | |
552 | ||
553 | COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]); | |
554 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs); | |
555 | if (! hard_reg_set_subset_p (temp_hard_regset, | |
556 | temp_hard_regset2)) | |
557 | continue; | |
558 | p = &alloc_reg_class_subclasses[j][0]; | |
559 | while (*p != LIM_REG_CLASSES) p++; | |
560 | *p = (enum reg_class) i; | |
561 | } | |
562 | } | |
563 | } | |
564 | ||
565 | \f | |
566 | ||
567 | /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST. */ | |
058e97ec VM |
568 | static void |
569 | setup_class_subset_and_memory_move_costs (void) | |
570 | { | |
1756cb66 | 571 | int cl, cl2, mode, cost; |
058e97ec VM |
572 | HARD_REG_SET temp_hard_regset2; |
573 | ||
574 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
575 | ira_memory_move_cost[mode][NO_REGS][0] | |
576 | = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX; | |
577 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) | |
578 | { | |
579 | if (cl != (int) NO_REGS) | |
580 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
581 | { | |
1756cb66 VM |
582 | ira_max_memory_move_cost[mode][cl][0] |
583 | = ira_memory_move_cost[mode][cl][0] | |
ef4bddc2 | 584 | = memory_move_cost ((machine_mode) mode, |
6f76a878 | 585 | (reg_class_t) cl, false); |
1756cb66 VM |
586 | ira_max_memory_move_cost[mode][cl][1] |
587 | = ira_memory_move_cost[mode][cl][1] | |
ef4bddc2 | 588 | = memory_move_cost ((machine_mode) mode, |
6f76a878 | 589 | (reg_class_t) cl, true); |
058e97ec VM |
590 | /* Costs for NO_REGS are used in cost calculation on the |
591 | 1st pass when the preferred register classes are not | |
592 | known yet. In this case we take the best scenario. */ | |
593 | if (ira_memory_move_cost[mode][NO_REGS][0] | |
594 | > ira_memory_move_cost[mode][cl][0]) | |
1756cb66 VM |
595 | ira_max_memory_move_cost[mode][NO_REGS][0] |
596 | = ira_memory_move_cost[mode][NO_REGS][0] | |
058e97ec VM |
597 | = ira_memory_move_cost[mode][cl][0]; |
598 | if (ira_memory_move_cost[mode][NO_REGS][1] | |
599 | > ira_memory_move_cost[mode][cl][1]) | |
1756cb66 VM |
600 | ira_max_memory_move_cost[mode][NO_REGS][1] |
601 | = ira_memory_move_cost[mode][NO_REGS][1] | |
058e97ec VM |
602 | = ira_memory_move_cost[mode][cl][1]; |
603 | } | |
058e97ec | 604 | } |
1756cb66 VM |
605 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
606 | for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--) | |
607 | { | |
608 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
609 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
610 | COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]); | |
611 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs); | |
612 | ira_class_subset_p[cl][cl2] | |
613 | = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2); | |
614 | if (! hard_reg_set_empty_p (temp_hard_regset2) | |
615 | && hard_reg_set_subset_p (reg_class_contents[cl2], | |
616 | reg_class_contents[cl])) | |
617 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
618 | { | |
619 | cost = ira_memory_move_cost[mode][cl2][0]; | |
620 | if (cost > ira_max_memory_move_cost[mode][cl][0]) | |
621 | ira_max_memory_move_cost[mode][cl][0] = cost; | |
622 | cost = ira_memory_move_cost[mode][cl2][1]; | |
623 | if (cost > ira_max_memory_move_cost[mode][cl][1]) | |
624 | ira_max_memory_move_cost[mode][cl][1] = cost; | |
625 | } | |
626 | } | |
627 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) | |
628 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
629 | { | |
630 | ira_memory_move_cost[mode][cl][0] | |
631 | = ira_max_memory_move_cost[mode][cl][0]; | |
632 | ira_memory_move_cost[mode][cl][1] | |
633 | = ira_max_memory_move_cost[mode][cl][1]; | |
634 | } | |
635 | setup_reg_subclasses (); | |
058e97ec VM |
636 | } |
637 | ||
638 | \f | |
639 | ||
640 | /* Define the following macro if allocation through malloc if | |
641 | preferable. */ | |
642 | #define IRA_NO_OBSTACK | |
643 | ||
644 | #ifndef IRA_NO_OBSTACK | |
645 | /* Obstack used for storing all dynamic data (except bitmaps) of the | |
646 | IRA. */ | |
647 | static struct obstack ira_obstack; | |
648 | #endif | |
649 | ||
650 | /* Obstack used for storing all bitmaps of the IRA. */ | |
651 | static struct bitmap_obstack ira_bitmap_obstack; | |
652 | ||
653 | /* Allocate memory of size LEN for IRA data. */ | |
654 | void * | |
655 | ira_allocate (size_t len) | |
656 | { | |
657 | void *res; | |
658 | ||
659 | #ifndef IRA_NO_OBSTACK | |
660 | res = obstack_alloc (&ira_obstack, len); | |
661 | #else | |
662 | res = xmalloc (len); | |
663 | #endif | |
664 | return res; | |
665 | } | |
666 | ||
058e97ec VM |
667 | /* Free memory ADDR allocated for IRA data. */ |
668 | void | |
669 | ira_free (void *addr ATTRIBUTE_UNUSED) | |
670 | { | |
671 | #ifndef IRA_NO_OBSTACK | |
672 | /* do nothing */ | |
673 | #else | |
674 | free (addr); | |
675 | #endif | |
676 | } | |
677 | ||
678 | ||
679 | /* Allocate and returns bitmap for IRA. */ | |
680 | bitmap | |
681 | ira_allocate_bitmap (void) | |
682 | { | |
683 | return BITMAP_ALLOC (&ira_bitmap_obstack); | |
684 | } | |
685 | ||
686 | /* Free bitmap B allocated for IRA. */ | |
687 | void | |
688 | ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED) | |
689 | { | |
690 | /* do nothing */ | |
691 | } | |
692 | ||
693 | \f | |
694 | ||
695 | /* Output information about allocation of all allocnos (except for | |
696 | caps) into file F. */ | |
697 | void | |
698 | ira_print_disposition (FILE *f) | |
699 | { | |
700 | int i, n, max_regno; | |
701 | ira_allocno_t a; | |
702 | basic_block bb; | |
703 | ||
704 | fprintf (f, "Disposition:"); | |
705 | max_regno = max_reg_num (); | |
706 | for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) | |
707 | for (a = ira_regno_allocno_map[i]; | |
708 | a != NULL; | |
709 | a = ALLOCNO_NEXT_REGNO_ALLOCNO (a)) | |
710 | { | |
711 | if (n % 4 == 0) | |
712 | fprintf (f, "\n"); | |
713 | n++; | |
714 | fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a)); | |
715 | if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL) | |
716 | fprintf (f, "b%-3d", bb->index); | |
717 | else | |
2608d841 | 718 | fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num); |
058e97ec VM |
719 | if (ALLOCNO_HARD_REGNO (a) >= 0) |
720 | fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a)); | |
721 | else | |
722 | fprintf (f, " mem"); | |
723 | } | |
724 | fprintf (f, "\n"); | |
725 | } | |
726 | ||
727 | /* Outputs information about allocation of all allocnos into | |
728 | stderr. */ | |
729 | void | |
730 | ira_debug_disposition (void) | |
731 | { | |
732 | ira_print_disposition (stderr); | |
733 | } | |
734 | ||
735 | \f | |
058e97ec | 736 | |
1756cb66 VM |
737 | /* Set up ira_stack_reg_pressure_class which is the biggest pressure |
738 | register class containing stack registers or NO_REGS if there are | |
739 | no stack registers. To find this class, we iterate through all | |
740 | register pressure classes and choose the first register pressure | |
741 | class containing all the stack registers and having the biggest | |
742 | size. */ | |
fe82cdfb | 743 | static void |
1756cb66 VM |
744 | setup_stack_reg_pressure_class (void) |
745 | { | |
746 | ira_stack_reg_pressure_class = NO_REGS; | |
747 | #ifdef STACK_REGS | |
748 | { | |
749 | int i, best, size; | |
750 | enum reg_class cl; | |
751 | HARD_REG_SET temp_hard_regset2; | |
752 | ||
753 | CLEAR_HARD_REG_SET (temp_hard_regset); | |
754 | for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) | |
755 | SET_HARD_REG_BIT (temp_hard_regset, i); | |
756 | best = 0; | |
757 | for (i = 0; i < ira_pressure_classes_num; i++) | |
758 | { | |
759 | cl = ira_pressure_classes[i]; | |
760 | COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset); | |
761 | AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]); | |
762 | size = hard_reg_set_size (temp_hard_regset2); | |
763 | if (best < size) | |
764 | { | |
765 | best = size; | |
766 | ira_stack_reg_pressure_class = cl; | |
767 | } | |
768 | } | |
769 | } | |
770 | #endif | |
771 | } | |
772 | ||
773 | /* Find pressure classes which are register classes for which we | |
774 | calculate register pressure in IRA, register pressure sensitive | |
775 | insn scheduling, and register pressure sensitive loop invariant | |
776 | motion. | |
777 | ||
778 | To make register pressure calculation easy, we always use | |
779 | non-intersected register pressure classes. A move of hard | |
780 | registers from one register pressure class is not more expensive | |
781 | than load and store of the hard registers. Most likely an allocno | |
782 | class will be a subset of a register pressure class and in many | |
783 | cases a register pressure class. That makes usage of register | |
784 | pressure classes a good approximation to find a high register | |
785 | pressure. */ | |
786 | static void | |
787 | setup_pressure_classes (void) | |
058e97ec | 788 | { |
1756cb66 VM |
789 | int cost, i, n, curr; |
790 | int cl, cl2; | |
791 | enum reg_class pressure_classes[N_REG_CLASSES]; | |
792 | int m; | |
058e97ec | 793 | HARD_REG_SET temp_hard_regset2; |
1756cb66 | 794 | bool insert_p; |
058e97ec | 795 | |
b4ff394c PH |
796 | if (targetm.compute_pressure_classes) |
797 | n = targetm.compute_pressure_classes (pressure_classes); | |
798 | else | |
799 | { | |
800 | n = 0; | |
801 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
1756cb66 | 802 | { |
b4ff394c PH |
803 | if (ira_class_hard_regs_num[cl] == 0) |
804 | continue; | |
805 | if (ira_class_hard_regs_num[cl] != 1 | |
806 | /* A register class without subclasses may contain a few | |
807 | hard registers and movement between them is costly | |
808 | (e.g. SPARC FPCC registers). We still should consider it | |
809 | as a candidate for a pressure class. */ | |
810 | && alloc_reg_class_subclasses[cl][0] < cl) | |
113a5be6 | 811 | { |
b4ff394c PH |
812 | /* Check that the moves between any hard registers of the |
813 | current class are not more expensive for a legal mode | |
814 | than load/store of the hard registers of the current | |
815 | class. Such class is a potential candidate to be a | |
816 | register pressure class. */ | |
817 | for (m = 0; m < NUM_MACHINE_MODES; m++) | |
818 | { | |
819 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
820 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
821 | AND_COMPL_HARD_REG_SET (temp_hard_regset, | |
822 | ira_prohibited_class_mode_regs[cl][m]); | |
823 | if (hard_reg_set_empty_p (temp_hard_regset)) | |
824 | continue; | |
825 | ira_init_register_move_cost_if_necessary ((machine_mode) m); | |
826 | cost = ira_register_move_cost[m][cl][cl]; | |
827 | if (cost <= ira_max_memory_move_cost[m][cl][1] | |
828 | || cost <= ira_max_memory_move_cost[m][cl][0]) | |
829 | break; | |
830 | } | |
831 | if (m >= NUM_MACHINE_MODES) | |
113a5be6 | 832 | continue; |
113a5be6 | 833 | } |
b4ff394c PH |
834 | curr = 0; |
835 | insert_p = true; | |
836 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
837 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
838 | /* Remove so far added pressure classes which are subset of the | |
839 | current candidate class. Prefer GENERAL_REGS as a pressure | |
840 | register class to another class containing the same | |
841 | allocatable hard registers. We do this because machine | |
842 | dependent cost hooks might give wrong costs for the latter | |
843 | class but always give the right cost for the former class | |
844 | (GENERAL_REGS). */ | |
845 | for (i = 0; i < n; i++) | |
1756cb66 | 846 | { |
b4ff394c PH |
847 | cl2 = pressure_classes[i]; |
848 | COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]); | |
849 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs); | |
850 | if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2) | |
851 | && (! hard_reg_set_equal_p (temp_hard_regset, | |
852 | temp_hard_regset2) | |
853 | || cl2 == (int) GENERAL_REGS)) | |
854 | { | |
855 | pressure_classes[curr++] = (enum reg_class) cl2; | |
856 | insert_p = false; | |
857 | continue; | |
858 | } | |
859 | if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset) | |
860 | && (! hard_reg_set_equal_p (temp_hard_regset2, | |
861 | temp_hard_regset) | |
862 | || cl == (int) GENERAL_REGS)) | |
863 | continue; | |
864 | if (hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset)) | |
865 | insert_p = false; | |
1756cb66 | 866 | pressure_classes[curr++] = (enum reg_class) cl2; |
1756cb66 | 867 | } |
b4ff394c PH |
868 | /* If the current candidate is a subset of a so far added |
869 | pressure class, don't add it to the list of the pressure | |
870 | classes. */ | |
871 | if (insert_p) | |
872 | pressure_classes[curr++] = (enum reg_class) cl; | |
873 | n = curr; | |
1756cb66 | 874 | } |
fe82cdfb | 875 | } |
1756cb66 | 876 | #ifdef ENABLE_IRA_CHECKING |
113a5be6 VM |
877 | { |
878 | HARD_REG_SET ignore_hard_regs; | |
879 | ||
880 | /* Check pressure classes correctness: here we check that hard | |
881 | registers from all register pressure classes contains all hard | |
882 | registers available for the allocation. */ | |
883 | CLEAR_HARD_REG_SET (temp_hard_regset); | |
884 | CLEAR_HARD_REG_SET (temp_hard_regset2); | |
885 | COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs); | |
886 | for (cl = 0; cl < LIM_REG_CLASSES; cl++) | |
887 | { | |
888 | /* For some targets (like MIPS with MD_REGS), there are some | |
889 | classes with hard registers available for allocation but | |
890 | not able to hold value of any mode. */ | |
891 | for (m = 0; m < NUM_MACHINE_MODES; m++) | |
892 | if (contains_reg_of_mode[cl][m]) | |
893 | break; | |
894 | if (m >= NUM_MACHINE_MODES) | |
895 | { | |
896 | IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]); | |
897 | continue; | |
898 | } | |
899 | for (i = 0; i < n; i++) | |
900 | if ((int) pressure_classes[i] == cl) | |
901 | break; | |
902 | IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]); | |
903 | if (i < n) | |
904 | IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
905 | } | |
906 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
df3e3493 | 907 | /* Some targets (like SPARC with ICC reg) have allocatable regs |
113a5be6 VM |
908 | for which no reg class is defined. */ |
909 | if (REGNO_REG_CLASS (i) == NO_REGS) | |
910 | SET_HARD_REG_BIT (ignore_hard_regs, i); | |
911 | AND_COMPL_HARD_REG_SET (temp_hard_regset, ignore_hard_regs); | |
912 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, ignore_hard_regs); | |
913 | ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)); | |
914 | } | |
1756cb66 VM |
915 | #endif |
916 | ira_pressure_classes_num = 0; | |
917 | for (i = 0; i < n; i++) | |
918 | { | |
919 | cl = (int) pressure_classes[i]; | |
920 | ira_reg_pressure_class_p[cl] = true; | |
921 | ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl; | |
922 | } | |
923 | setup_stack_reg_pressure_class (); | |
058e97ec VM |
924 | } |
925 | ||
165f639c VM |
926 | /* Set up IRA_UNIFORM_CLASS_P. Uniform class is a register class |
927 | whose register move cost between any registers of the class is the | |
928 | same as for all its subclasses. We use the data to speed up the | |
929 | 2nd pass of calculations of allocno costs. */ | |
930 | static void | |
931 | setup_uniform_class_p (void) | |
932 | { | |
933 | int i, cl, cl2, m; | |
934 | ||
935 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
936 | { | |
937 | ira_uniform_class_p[cl] = false; | |
938 | if (ira_class_hard_regs_num[cl] == 0) | |
939 | continue; | |
67914693 | 940 | /* We cannot use alloc_reg_class_subclasses here because move |
165f639c VM |
941 | cost hooks does not take into account that some registers are |
942 | unavailable for the subtarget. E.g. for i686, INT_SSE_REGS | |
943 | is element of alloc_reg_class_subclasses for GENERAL_REGS | |
944 | because SSE regs are unavailable. */ | |
945 | for (i = 0; (cl2 = reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; i++) | |
946 | { | |
947 | if (ira_class_hard_regs_num[cl2] == 0) | |
948 | continue; | |
949 | for (m = 0; m < NUM_MACHINE_MODES; m++) | |
950 | if (contains_reg_of_mode[cl][m] && contains_reg_of_mode[cl2][m]) | |
951 | { | |
ef4bddc2 | 952 | ira_init_register_move_cost_if_necessary ((machine_mode) m); |
165f639c VM |
953 | if (ira_register_move_cost[m][cl][cl] |
954 | != ira_register_move_cost[m][cl2][cl2]) | |
955 | break; | |
956 | } | |
957 | if (m < NUM_MACHINE_MODES) | |
958 | break; | |
959 | } | |
960 | if (cl2 == LIM_REG_CLASSES) | |
961 | ira_uniform_class_p[cl] = true; | |
962 | } | |
963 | } | |
964 | ||
1756cb66 VM |
965 | /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM, |
966 | IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM. | |
967 | ||
df3e3493 | 968 | Target may have many subtargets and not all target hard registers can |
67914693 | 969 | be used for allocation, e.g. x86 port in 32-bit mode cannot use |
1756cb66 VM |
970 | hard registers introduced in x86-64 like r8-r15). Some classes |
971 | might have the same allocatable hard registers, e.g. INDEX_REGS | |
972 | and GENERAL_REGS in x86 port in 32-bit mode. To decrease different | |
973 | calculations efforts we introduce allocno classes which contain | |
974 | unique non-empty sets of allocatable hard-registers. | |
975 | ||
976 | Pseudo class cost calculation in ira-costs.c is very expensive. | |
977 | Therefore we are trying to decrease number of classes involved in | |
978 | such calculation. Register classes used in the cost calculation | |
979 | are called important classes. They are allocno classes and other | |
980 | non-empty classes whose allocatable hard register sets are inside | |
981 | of an allocno class hard register set. From the first sight, it | |
982 | looks like that they are just allocno classes. It is not true. In | |
983 | example of x86-port in 32-bit mode, allocno classes will contain | |
984 | GENERAL_REGS but not LEGACY_REGS (because allocatable hard | |
985 | registers are the same for the both classes). The important | |
986 | classes will contain GENERAL_REGS and LEGACY_REGS. It is done | |
987 | because a machine description insn constraint may refers for | |
988 | LEGACY_REGS and code in ira-costs.c is mostly base on investigation | |
989 | of the insn constraints. */ | |
058e97ec | 990 | static void |
1756cb66 | 991 | setup_allocno_and_important_classes (void) |
058e97ec | 992 | { |
32e8bb8e | 993 | int i, j, n, cl; |
db1a8d98 | 994 | bool set_p; |
058e97ec | 995 | HARD_REG_SET temp_hard_regset2; |
7db7ed3c VM |
996 | static enum reg_class classes[LIM_REG_CLASSES + 1]; |
997 | ||
1756cb66 VM |
998 | n = 0; |
999 | /* Collect classes which contain unique sets of allocatable hard | |
1000 | registers. Prefer GENERAL_REGS to other classes containing the | |
1001 | same set of hard registers. */ | |
a58dfa49 | 1002 | for (i = 0; i < LIM_REG_CLASSES; i++) |
99710245 | 1003 | { |
1756cb66 VM |
1004 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]); |
1005 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
1006 | for (j = 0; j < n; j++) | |
7db7ed3c | 1007 | { |
1756cb66 VM |
1008 | cl = classes[j]; |
1009 | COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]); | |
1010 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, | |
1011 | no_unit_alloc_regs); | |
1012 | if (hard_reg_set_equal_p (temp_hard_regset, | |
1013 | temp_hard_regset2)) | |
1014 | break; | |
7db7ed3c | 1015 | } |
e93f30a6 | 1016 | if (j >= n || targetm.additional_allocno_class_p (i)) |
1756cb66 VM |
1017 | classes[n++] = (enum reg_class) i; |
1018 | else if (i == GENERAL_REGS) | |
1019 | /* Prefer general regs. For i386 example, it means that | |
1020 | we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS | |
1021 | (all of them consists of the same available hard | |
1022 | registers). */ | |
1023 | classes[j] = (enum reg_class) i; | |
7db7ed3c | 1024 | } |
1756cb66 | 1025 | classes[n] = LIM_REG_CLASSES; |
058e97ec | 1026 | |
1756cb66 | 1027 | /* Set up classes which can be used for allocnos as classes |
df3e3493 | 1028 | containing non-empty unique sets of allocatable hard |
1756cb66 VM |
1029 | registers. */ |
1030 | ira_allocno_classes_num = 0; | |
058e97ec | 1031 | for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++) |
3e575fe2 | 1032 | if (ira_class_hard_regs_num[cl] > 0) |
1756cb66 | 1033 | ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl; |
058e97ec | 1034 | ira_important_classes_num = 0; |
1756cb66 VM |
1035 | /* Add non-allocno classes containing to non-empty set of |
1036 | allocatable hard regs. */ | |
058e97ec | 1037 | for (cl = 0; cl < N_REG_CLASSES; cl++) |
3e575fe2 RS |
1038 | if (ira_class_hard_regs_num[cl] > 0) |
1039 | { | |
1040 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); | |
1041 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
1042 | set_p = false; | |
1043 | for (j = 0; j < ira_allocno_classes_num; j++) | |
1044 | { | |
1045 | COPY_HARD_REG_SET (temp_hard_regset2, | |
1046 | reg_class_contents[ira_allocno_classes[j]]); | |
1047 | AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs); | |
1048 | if ((enum reg_class) cl == ira_allocno_classes[j]) | |
1049 | break; | |
1050 | else if (hard_reg_set_subset_p (temp_hard_regset, | |
1051 | temp_hard_regset2)) | |
1052 | set_p = true; | |
1053 | } | |
1054 | if (set_p && j >= ira_allocno_classes_num) | |
1055 | ira_important_classes[ira_important_classes_num++] | |
1056 | = (enum reg_class) cl; | |
1057 | } | |
1756cb66 VM |
1058 | /* Now add allocno classes to the important classes. */ |
1059 | for (j = 0; j < ira_allocno_classes_num; j++) | |
db1a8d98 | 1060 | ira_important_classes[ira_important_classes_num++] |
1756cb66 VM |
1061 | = ira_allocno_classes[j]; |
1062 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
1063 | { | |
1064 | ira_reg_allocno_class_p[cl] = false; | |
1065 | ira_reg_pressure_class_p[cl] = false; | |
1066 | } | |
1067 | for (j = 0; j < ira_allocno_classes_num; j++) | |
1068 | ira_reg_allocno_class_p[ira_allocno_classes[j]] = true; | |
1069 | setup_pressure_classes (); | |
165f639c | 1070 | setup_uniform_class_p (); |
058e97ec | 1071 | } |
058e97ec | 1072 | |
1756cb66 VM |
1073 | /* Setup translation in CLASS_TRANSLATE of all classes into a class |
1074 | given by array CLASSES of length CLASSES_NUM. The function is used | |
1075 | make translation any reg class to an allocno class or to an | |
1076 | pressure class. This translation is necessary for some | |
1077 | calculations when we can use only allocno or pressure classes and | |
1078 | such translation represents an approximate representation of all | |
1079 | classes. | |
1080 | ||
1081 | The translation in case when allocatable hard register set of a | |
1082 | given class is subset of allocatable hard register set of a class | |
1083 | in CLASSES is pretty simple. We use smallest classes from CLASSES | |
1084 | containing a given class. If allocatable hard register set of a | |
1085 | given class is not a subset of any corresponding set of a class | |
1086 | from CLASSES, we use the cheapest (with load/store point of view) | |
2b9c63a2 | 1087 | class from CLASSES whose set intersects with given class set. */ |
058e97ec | 1088 | static void |
1756cb66 VM |
1089 | setup_class_translate_array (enum reg_class *class_translate, |
1090 | int classes_num, enum reg_class *classes) | |
058e97ec | 1091 | { |
32e8bb8e | 1092 | int cl, mode; |
1756cb66 | 1093 | enum reg_class aclass, best_class, *cl_ptr; |
058e97ec VM |
1094 | int i, cost, min_cost, best_cost; |
1095 | ||
1096 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
1756cb66 | 1097 | class_translate[cl] = NO_REGS; |
b8698a0f | 1098 | |
1756cb66 | 1099 | for (i = 0; i < classes_num; i++) |
058e97ec | 1100 | { |
1756cb66 VM |
1101 | aclass = classes[i]; |
1102 | for (cl_ptr = &alloc_reg_class_subclasses[aclass][0]; | |
1103 | (cl = *cl_ptr) != LIM_REG_CLASSES; | |
1104 | cl_ptr++) | |
1105 | if (class_translate[cl] == NO_REGS) | |
1106 | class_translate[cl] = aclass; | |
1107 | class_translate[aclass] = aclass; | |
058e97ec | 1108 | } |
1756cb66 VM |
1109 | /* For classes which are not fully covered by one of given classes |
1110 | (in other words covered by more one given class), use the | |
1111 | cheapest class. */ | |
058e97ec VM |
1112 | for (cl = 0; cl < N_REG_CLASSES; cl++) |
1113 | { | |
1756cb66 | 1114 | if (cl == NO_REGS || class_translate[cl] != NO_REGS) |
058e97ec VM |
1115 | continue; |
1116 | best_class = NO_REGS; | |
1117 | best_cost = INT_MAX; | |
1756cb66 | 1118 | for (i = 0; i < classes_num; i++) |
058e97ec | 1119 | { |
1756cb66 | 1120 | aclass = classes[i]; |
058e97ec | 1121 | COPY_HARD_REG_SET (temp_hard_regset, |
1756cb66 | 1122 | reg_class_contents[aclass]); |
058e97ec VM |
1123 | AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); |
1124 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
4f341ea0 | 1125 | if (! hard_reg_set_empty_p (temp_hard_regset)) |
058e97ec VM |
1126 | { |
1127 | min_cost = INT_MAX; | |
1128 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
1129 | { | |
761a8eb7 VM |
1130 | cost = (ira_memory_move_cost[mode][aclass][0] |
1131 | + ira_memory_move_cost[mode][aclass][1]); | |
058e97ec VM |
1132 | if (min_cost > cost) |
1133 | min_cost = cost; | |
1134 | } | |
1135 | if (best_class == NO_REGS || best_cost > min_cost) | |
1136 | { | |
1756cb66 | 1137 | best_class = aclass; |
058e97ec VM |
1138 | best_cost = min_cost; |
1139 | } | |
1140 | } | |
1141 | } | |
1756cb66 | 1142 | class_translate[cl] = best_class; |
058e97ec VM |
1143 | } |
1144 | } | |
058e97ec | 1145 | |
1756cb66 VM |
1146 | /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and |
1147 | IRA_PRESSURE_CLASS_TRANSLATE. */ | |
1148 | static void | |
1149 | setup_class_translate (void) | |
1150 | { | |
1151 | setup_class_translate_array (ira_allocno_class_translate, | |
1152 | ira_allocno_classes_num, ira_allocno_classes); | |
1153 | setup_class_translate_array (ira_pressure_class_translate, | |
1154 | ira_pressure_classes_num, ira_pressure_classes); | |
1155 | } | |
1156 | ||
1157 | /* Order numbers of allocno classes in original target allocno class | |
1158 | array, -1 for non-allocno classes. */ | |
1159 | static int allocno_class_order[N_REG_CLASSES]; | |
db1a8d98 VM |
1160 | |
1161 | /* The function used to sort the important classes. */ | |
1162 | static int | |
1163 | comp_reg_classes_func (const void *v1p, const void *v2p) | |
1164 | { | |
1165 | enum reg_class cl1 = *(const enum reg_class *) v1p; | |
1166 | enum reg_class cl2 = *(const enum reg_class *) v2p; | |
1756cb66 | 1167 | enum reg_class tcl1, tcl2; |
db1a8d98 VM |
1168 | int diff; |
1169 | ||
1756cb66 VM |
1170 | tcl1 = ira_allocno_class_translate[cl1]; |
1171 | tcl2 = ira_allocno_class_translate[cl2]; | |
1172 | if (tcl1 != NO_REGS && tcl2 != NO_REGS | |
1173 | && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0) | |
db1a8d98 VM |
1174 | return diff; |
1175 | return (int) cl1 - (int) cl2; | |
1176 | } | |
1177 | ||
1756cb66 VM |
1178 | /* For correct work of function setup_reg_class_relation we need to |
1179 | reorder important classes according to the order of their allocno | |
1180 | classes. It places important classes containing the same | |
1181 | allocatable hard register set adjacent to each other and allocno | |
1182 | class with the allocatable hard register set right after the other | |
1183 | important classes with the same set. | |
1184 | ||
1185 | In example from comments of function | |
1186 | setup_allocno_and_important_classes, it places LEGACY_REGS and | |
1187 | GENERAL_REGS close to each other and GENERAL_REGS is after | |
1188 | LEGACY_REGS. */ | |
db1a8d98 VM |
1189 | static void |
1190 | reorder_important_classes (void) | |
1191 | { | |
1192 | int i; | |
1193 | ||
1194 | for (i = 0; i < N_REG_CLASSES; i++) | |
1756cb66 VM |
1195 | allocno_class_order[i] = -1; |
1196 | for (i = 0; i < ira_allocno_classes_num; i++) | |
1197 | allocno_class_order[ira_allocno_classes[i]] = i; | |
db1a8d98 VM |
1198 | qsort (ira_important_classes, ira_important_classes_num, |
1199 | sizeof (enum reg_class), comp_reg_classes_func); | |
1756cb66 VM |
1200 | for (i = 0; i < ira_important_classes_num; i++) |
1201 | ira_important_class_nums[ira_important_classes[i]] = i; | |
db1a8d98 VM |
1202 | } |
1203 | ||
1756cb66 VM |
1204 | /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION, |
1205 | IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and | |
1206 | IRA_REG_CLASSES_INTERSECT_P. For the meaning of the relations, | |
1207 | please see corresponding comments in ira-int.h. */ | |
058e97ec | 1208 | static void |
7db7ed3c | 1209 | setup_reg_class_relations (void) |
058e97ec VM |
1210 | { |
1211 | int i, cl1, cl2, cl3; | |
1212 | HARD_REG_SET intersection_set, union_set, temp_set2; | |
7db7ed3c | 1213 | bool important_class_p[N_REG_CLASSES]; |
058e97ec | 1214 | |
7db7ed3c VM |
1215 | memset (important_class_p, 0, sizeof (important_class_p)); |
1216 | for (i = 0; i < ira_important_classes_num; i++) | |
1217 | important_class_p[ira_important_classes[i]] = true; | |
058e97ec VM |
1218 | for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
1219 | { | |
7db7ed3c | 1220 | ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES; |
058e97ec VM |
1221 | for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
1222 | { | |
7db7ed3c | 1223 | ira_reg_classes_intersect_p[cl1][cl2] = false; |
058e97ec | 1224 | ira_reg_class_intersect[cl1][cl2] = NO_REGS; |
55a2c322 | 1225 | ira_reg_class_subset[cl1][cl2] = NO_REGS; |
058e97ec VM |
1226 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]); |
1227 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
1228 | COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]); | |
1229 | AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs); | |
4f341ea0 RS |
1230 | if (hard_reg_set_empty_p (temp_hard_regset) |
1231 | && hard_reg_set_empty_p (temp_set2)) | |
058e97ec | 1232 | { |
1756cb66 VM |
1233 | /* The both classes have no allocatable hard registers |
1234 | -- take all class hard registers into account and use | |
1235 | reg_class_subunion and reg_class_superunion. */ | |
058e97ec VM |
1236 | for (i = 0;; i++) |
1237 | { | |
1238 | cl3 = reg_class_subclasses[cl1][i]; | |
1239 | if (cl3 == LIM_REG_CLASSES) | |
1240 | break; | |
1241 | if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2], | |
bbbbb16a ILT |
1242 | (enum reg_class) cl3)) |
1243 | ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3; | |
058e97ec | 1244 | } |
1756cb66 VM |
1245 | ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2]; |
1246 | ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2]; | |
058e97ec VM |
1247 | continue; |
1248 | } | |
7db7ed3c VM |
1249 | ira_reg_classes_intersect_p[cl1][cl2] |
1250 | = hard_reg_set_intersect_p (temp_hard_regset, temp_set2); | |
1251 | if (important_class_p[cl1] && important_class_p[cl2] | |
1252 | && hard_reg_set_subset_p (temp_hard_regset, temp_set2)) | |
1253 | { | |
1756cb66 VM |
1254 | /* CL1 and CL2 are important classes and CL1 allocatable |
1255 | hard register set is inside of CL2 allocatable hard | |
1256 | registers -- make CL1 a superset of CL2. */ | |
7db7ed3c VM |
1257 | enum reg_class *p; |
1258 | ||
1259 | p = &ira_reg_class_super_classes[cl1][0]; | |
1260 | while (*p != LIM_REG_CLASSES) | |
1261 | p++; | |
1262 | *p++ = (enum reg_class) cl2; | |
1263 | *p = LIM_REG_CLASSES; | |
1264 | } | |
1756cb66 VM |
1265 | ira_reg_class_subunion[cl1][cl2] = NO_REGS; |
1266 | ira_reg_class_superunion[cl1][cl2] = NO_REGS; | |
058e97ec VM |
1267 | COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]); |
1268 | AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]); | |
1269 | AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs); | |
1270 | COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]); | |
1271 | IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]); | |
1272 | AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs); | |
55a2c322 | 1273 | for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++) |
058e97ec | 1274 | { |
058e97ec VM |
1275 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]); |
1276 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
1277 | if (hard_reg_set_subset_p (temp_hard_regset, intersection_set)) | |
1278 | { | |
1756cb66 VM |
1279 | /* CL3 allocatable hard register set is inside of |
1280 | intersection of allocatable hard register sets | |
1281 | of CL1 and CL2. */ | |
55a2c322 VM |
1282 | if (important_class_p[cl3]) |
1283 | { | |
1284 | COPY_HARD_REG_SET | |
1285 | (temp_set2, | |
1286 | reg_class_contents | |
1287 | [(int) ira_reg_class_intersect[cl1][cl2]]); | |
1288 | AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs); | |
1289 | if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2) | |
1290 | /* If the allocatable hard register sets are | |
1291 | the same, prefer GENERAL_REGS or the | |
1292 | smallest class for debugging | |
1293 | purposes. */ | |
1294 | || (hard_reg_set_equal_p (temp_hard_regset, temp_set2) | |
1295 | && (cl3 == GENERAL_REGS | |
1296 | || ((ira_reg_class_intersect[cl1][cl2] | |
1297 | != GENERAL_REGS) | |
1298 | && hard_reg_set_subset_p | |
1299 | (reg_class_contents[cl3], | |
1300 | reg_class_contents | |
1301 | [(int) | |
1302 | ira_reg_class_intersect[cl1][cl2]]))))) | |
1303 | ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3; | |
1304 | } | |
058e97ec VM |
1305 | COPY_HARD_REG_SET |
1306 | (temp_set2, | |
55a2c322 | 1307 | reg_class_contents[(int) ira_reg_class_subset[cl1][cl2]]); |
058e97ec | 1308 | AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs); |
55a2c322 VM |
1309 | if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2) |
1310 | /* Ignore unavailable hard registers and prefer | |
1311 | smallest class for debugging purposes. */ | |
058e97ec | 1312 | || (hard_reg_set_equal_p (temp_hard_regset, temp_set2) |
55a2c322 VM |
1313 | && hard_reg_set_subset_p |
1314 | (reg_class_contents[cl3], | |
1315 | reg_class_contents | |
1316 | [(int) ira_reg_class_subset[cl1][cl2]]))) | |
1317 | ira_reg_class_subset[cl1][cl2] = (enum reg_class) cl3; | |
058e97ec | 1318 | } |
55a2c322 VM |
1319 | if (important_class_p[cl3] |
1320 | && hard_reg_set_subset_p (temp_hard_regset, union_set)) | |
058e97ec | 1321 | { |
df3e3493 | 1322 | /* CL3 allocatable hard register set is inside of |
1756cb66 VM |
1323 | union of allocatable hard register sets of CL1 |
1324 | and CL2. */ | |
058e97ec VM |
1325 | COPY_HARD_REG_SET |
1326 | (temp_set2, | |
1756cb66 | 1327 | reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]); |
058e97ec | 1328 | AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs); |
1756cb66 | 1329 | if (ira_reg_class_subunion[cl1][cl2] == NO_REGS |
058e97ec | 1330 | || (hard_reg_set_subset_p (temp_set2, temp_hard_regset) |
1756cb66 VM |
1331 | |
1332 | && (! hard_reg_set_equal_p (temp_set2, | |
1333 | temp_hard_regset) | |
1334 | || cl3 == GENERAL_REGS | |
1335 | /* If the allocatable hard register sets are the | |
1336 | same, prefer GENERAL_REGS or the smallest | |
1337 | class for debugging purposes. */ | |
1338 | || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS | |
1339 | && hard_reg_set_subset_p | |
1340 | (reg_class_contents[cl3], | |
1341 | reg_class_contents | |
1342 | [(int) ira_reg_class_subunion[cl1][cl2]]))))) | |
1343 | ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3; | |
1344 | } | |
1345 | if (hard_reg_set_subset_p (union_set, temp_hard_regset)) | |
1346 | { | |
1347 | /* CL3 allocatable hard register set contains union | |
1348 | of allocatable hard register sets of CL1 and | |
1349 | CL2. */ | |
1350 | COPY_HARD_REG_SET | |
1351 | (temp_set2, | |
1352 | reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]); | |
1353 | AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs); | |
1354 | if (ira_reg_class_superunion[cl1][cl2] == NO_REGS | |
1355 | || (hard_reg_set_subset_p (temp_hard_regset, temp_set2) | |
b8698a0f | 1356 | |
058e97ec VM |
1357 | && (! hard_reg_set_equal_p (temp_set2, |
1358 | temp_hard_regset) | |
1756cb66 VM |
1359 | || cl3 == GENERAL_REGS |
1360 | /* If the allocatable hard register sets are the | |
1361 | same, prefer GENERAL_REGS or the smallest | |
1362 | class for debugging purposes. */ | |
1363 | || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS | |
1364 | && hard_reg_set_subset_p | |
1365 | (reg_class_contents[cl3], | |
1366 | reg_class_contents | |
1367 | [(int) ira_reg_class_superunion[cl1][cl2]]))))) | |
1368 | ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3; | |
058e97ec VM |
1369 | } |
1370 | } | |
1371 | } | |
1372 | } | |
1373 | } | |
1374 | ||
df3e3493 | 1375 | /* Output all uniform and important classes into file F. */ |
165f639c | 1376 | static void |
89e94470 | 1377 | print_uniform_and_important_classes (FILE *f) |
165f639c | 1378 | { |
165f639c VM |
1379 | int i, cl; |
1380 | ||
1381 | fprintf (f, "Uniform classes:\n"); | |
1382 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
1383 | if (ira_uniform_class_p[cl]) | |
1384 | fprintf (f, " %s", reg_class_names[cl]); | |
1385 | fprintf (f, "\nImportant classes:\n"); | |
1386 | for (i = 0; i < ira_important_classes_num; i++) | |
1387 | fprintf (f, " %s", reg_class_names[ira_important_classes[i]]); | |
1388 | fprintf (f, "\n"); | |
1389 | } | |
1390 | ||
1391 | /* Output all possible allocno or pressure classes and their | |
1392 | translation map into file F. */ | |
058e97ec | 1393 | static void |
165f639c | 1394 | print_translated_classes (FILE *f, bool pressure_p) |
1756cb66 VM |
1395 | { |
1396 | int classes_num = (pressure_p | |
1397 | ? ira_pressure_classes_num : ira_allocno_classes_num); | |
1398 | enum reg_class *classes = (pressure_p | |
1399 | ? ira_pressure_classes : ira_allocno_classes); | |
1400 | enum reg_class *class_translate = (pressure_p | |
1401 | ? ira_pressure_class_translate | |
1402 | : ira_allocno_class_translate); | |
058e97ec VM |
1403 | int i; |
1404 | ||
1756cb66 VM |
1405 | fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno"); |
1406 | for (i = 0; i < classes_num; i++) | |
1407 | fprintf (f, " %s", reg_class_names[classes[i]]); | |
058e97ec VM |
1408 | fprintf (f, "\nClass translation:\n"); |
1409 | for (i = 0; i < N_REG_CLASSES; i++) | |
1410 | fprintf (f, " %s -> %s\n", reg_class_names[i], | |
1756cb66 | 1411 | reg_class_names[class_translate[i]]); |
058e97ec VM |
1412 | } |
1413 | ||
1756cb66 VM |
1414 | /* Output all possible allocno and translation classes and the |
1415 | translation maps into stderr. */ | |
058e97ec | 1416 | void |
1756cb66 | 1417 | ira_debug_allocno_classes (void) |
058e97ec | 1418 | { |
89e94470 | 1419 | print_uniform_and_important_classes (stderr); |
165f639c VM |
1420 | print_translated_classes (stderr, false); |
1421 | print_translated_classes (stderr, true); | |
058e97ec VM |
1422 | } |
1423 | ||
1756cb66 | 1424 | /* Set up different arrays concerning class subsets, allocno and |
058e97ec VM |
1425 | important classes. */ |
1426 | static void | |
1756cb66 | 1427 | find_reg_classes (void) |
058e97ec | 1428 | { |
1756cb66 | 1429 | setup_allocno_and_important_classes (); |
7db7ed3c | 1430 | setup_class_translate (); |
db1a8d98 | 1431 | reorder_important_classes (); |
7db7ed3c | 1432 | setup_reg_class_relations (); |
058e97ec VM |
1433 | } |
1434 | ||
1435 | \f | |
1436 | ||
c0683a82 VM |
1437 | /* Set up the array above. */ |
1438 | static void | |
1756cb66 | 1439 | setup_hard_regno_aclass (void) |
c0683a82 | 1440 | { |
7efcf910 | 1441 | int i; |
c0683a82 VM |
1442 | |
1443 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
1444 | { | |
1756cb66 VM |
1445 | #if 1 |
1446 | ira_hard_regno_allocno_class[i] | |
7efcf910 CLT |
1447 | = (TEST_HARD_REG_BIT (no_unit_alloc_regs, i) |
1448 | ? NO_REGS | |
1756cb66 VM |
1449 | : ira_allocno_class_translate[REGNO_REG_CLASS (i)]); |
1450 | #else | |
1451 | int j; | |
1452 | enum reg_class cl; | |
1453 | ira_hard_regno_allocno_class[i] = NO_REGS; | |
1454 | for (j = 0; j < ira_allocno_classes_num; j++) | |
1455 | { | |
1456 | cl = ira_allocno_classes[j]; | |
1457 | if (ira_class_hard_reg_index[cl][i] >= 0) | |
1458 | { | |
1459 | ira_hard_regno_allocno_class[i] = cl; | |
1460 | break; | |
1461 | } | |
1462 | } | |
1463 | #endif | |
c0683a82 VM |
1464 | } |
1465 | } | |
1466 | ||
1467 | \f | |
1468 | ||
1756cb66 | 1469 | /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps. */ |
058e97ec VM |
1470 | static void |
1471 | setup_reg_class_nregs (void) | |
1472 | { | |
1756cb66 | 1473 | int i, cl, cl2, m; |
058e97ec | 1474 | |
1756cb66 VM |
1475 | for (m = 0; m < MAX_MACHINE_MODE; m++) |
1476 | { | |
1477 | for (cl = 0; cl < N_REG_CLASSES; cl++) | |
1478 | ira_reg_class_max_nregs[cl][m] | |
1479 | = ira_reg_class_min_nregs[cl][m] | |
ef4bddc2 | 1480 | = targetm.class_max_nregs ((reg_class_t) cl, (machine_mode) m); |
1756cb66 VM |
1481 | for (cl = 0; cl < N_REG_CLASSES; cl++) |
1482 | for (i = 0; | |
1483 | (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; | |
1484 | i++) | |
1485 | if (ira_reg_class_min_nregs[cl2][m] | |
1486 | < ira_reg_class_min_nregs[cl][m]) | |
1487 | ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m]; | |
1488 | } | |
058e97ec VM |
1489 | } |
1490 | ||
1491 | \f | |
1492 | ||
c9d74da6 RS |
1493 | /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON. |
1494 | This function is called once IRA_CLASS_HARD_REGS has been initialized. */ | |
058e97ec VM |
1495 | static void |
1496 | setup_prohibited_class_mode_regs (void) | |
1497 | { | |
c9d74da6 | 1498 | int j, k, hard_regno, cl, last_hard_regno, count; |
058e97ec | 1499 | |
1756cb66 | 1500 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) |
058e97ec | 1501 | { |
c9d74da6 RS |
1502 | COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]); |
1503 | AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs); | |
058e97ec VM |
1504 | for (j = 0; j < NUM_MACHINE_MODES; j++) |
1505 | { | |
c9d74da6 RS |
1506 | count = 0; |
1507 | last_hard_regno = -1; | |
1756cb66 | 1508 | CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]); |
058e97ec VM |
1509 | for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--) |
1510 | { | |
1511 | hard_regno = ira_class_hard_regs[cl][k]; | |
f939c3e6 | 1512 | if (!targetm.hard_regno_mode_ok (hard_regno, (machine_mode) j)) |
1756cb66 | 1513 | SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], |
058e97ec | 1514 | hard_regno); |
c9d74da6 | 1515 | else if (in_hard_reg_set_p (temp_hard_regset, |
ef4bddc2 | 1516 | (machine_mode) j, hard_regno)) |
c9d74da6 RS |
1517 | { |
1518 | last_hard_regno = hard_regno; | |
1519 | count++; | |
1520 | } | |
058e97ec | 1521 | } |
c9d74da6 | 1522 | ira_class_singleton[cl][j] = (count == 1 ? last_hard_regno : -1); |
058e97ec VM |
1523 | } |
1524 | } | |
1525 | } | |
1526 | ||
1756cb66 VM |
1527 | /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers |
1528 | spanning from one register pressure class to another one. It is | |
1529 | called after defining the pressure classes. */ | |
1530 | static void | |
1531 | clarify_prohibited_class_mode_regs (void) | |
1532 | { | |
1533 | int j, k, hard_regno, cl, pclass, nregs; | |
1534 | ||
1535 | for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--) | |
1536 | for (j = 0; j < NUM_MACHINE_MODES; j++) | |
a2c19e93 RS |
1537 | { |
1538 | CLEAR_HARD_REG_SET (ira_useful_class_mode_regs[cl][j]); | |
1539 | for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--) | |
1540 | { | |
1541 | hard_regno = ira_class_hard_regs[cl][k]; | |
1542 | if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno)) | |
1543 | continue; | |
ad474626 | 1544 | nregs = hard_regno_nregs (hard_regno, (machine_mode) j); |
a2c19e93 | 1545 | if (hard_regno + nregs > FIRST_PSEUDO_REGISTER) |
1756cb66 VM |
1546 | { |
1547 | SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], | |
1548 | hard_regno); | |
a2c19e93 | 1549 | continue; |
1756cb66 | 1550 | } |
a2c19e93 RS |
1551 | pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)]; |
1552 | for (nregs-- ;nregs >= 0; nregs--) | |
1553 | if (((enum reg_class) pclass | |
1554 | != ira_pressure_class_translate[REGNO_REG_CLASS | |
1555 | (hard_regno + nregs)])) | |
1556 | { | |
1557 | SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], | |
1558 | hard_regno); | |
1559 | break; | |
1560 | } | |
1561 | if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], | |
1562 | hard_regno)) | |
1563 | add_to_hard_reg_set (&ira_useful_class_mode_regs[cl][j], | |
ef4bddc2 | 1564 | (machine_mode) j, hard_regno); |
a2c19e93 RS |
1565 | } |
1566 | } | |
1756cb66 | 1567 | } |
058e97ec | 1568 | \f |
7cc61ee4 RS |
1569 | /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST |
1570 | and IRA_MAY_MOVE_OUT_COST for MODE. */ | |
1571 | void | |
ef4bddc2 | 1572 | ira_init_register_move_cost (machine_mode mode) |
e80ccebc RS |
1573 | { |
1574 | static unsigned short last_move_cost[N_REG_CLASSES][N_REG_CLASSES]; | |
1575 | bool all_match = true; | |
e384094a VM |
1576 | unsigned int i, cl1, cl2; |
1577 | HARD_REG_SET ok_regs; | |
e80ccebc | 1578 | |
7cc61ee4 RS |
1579 | ira_assert (ira_register_move_cost[mode] == NULL |
1580 | && ira_may_move_in_cost[mode] == NULL | |
1581 | && ira_may_move_out_cost[mode] == NULL); | |
e384094a VM |
1582 | CLEAR_HARD_REG_SET (ok_regs); |
1583 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
1584 | if (targetm.hard_regno_mode_ok (i, mode)) | |
1585 | SET_HARD_REG_BIT (ok_regs, i); | |
1586 | ||
87e176df RS |
1587 | /* Note that we might be asked about the move costs of modes that |
1588 | cannot be stored in any hard register, for example if an inline | |
1589 | asm tries to create a register operand with an impossible mode. | |
1590 | We therefore can't assert have_regs_of_mode[mode] here. */ | |
ed9e2ed0 | 1591 | for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
fef37404 VM |
1592 | for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
1593 | { | |
1594 | int cost; | |
e384094a VM |
1595 | if (!hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl1]) |
1596 | || !hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl2])) | |
fef37404 VM |
1597 | { |
1598 | if ((ira_reg_class_max_nregs[cl1][mode] | |
1599 | > ira_class_hard_regs_num[cl1]) | |
1600 | || (ira_reg_class_max_nregs[cl2][mode] | |
1601 | > ira_class_hard_regs_num[cl2])) | |
1602 | cost = 65535; | |
1603 | else | |
1604 | cost = (ira_memory_move_cost[mode][cl1][0] | |
1a788c05 | 1605 | + ira_memory_move_cost[mode][cl2][1]) * 2; |
fef37404 VM |
1606 | } |
1607 | else | |
1608 | { | |
1609 | cost = register_move_cost (mode, (enum reg_class) cl1, | |
1610 | (enum reg_class) cl2); | |
1611 | ira_assert (cost < 65535); | |
1612 | } | |
1613 | all_match &= (last_move_cost[cl1][cl2] == cost); | |
1614 | last_move_cost[cl1][cl2] = cost; | |
1615 | } | |
e80ccebc RS |
1616 | if (all_match && last_mode_for_init_move_cost != -1) |
1617 | { | |
7cc61ee4 RS |
1618 | ira_register_move_cost[mode] |
1619 | = ira_register_move_cost[last_mode_for_init_move_cost]; | |
1620 | ira_may_move_in_cost[mode] | |
1621 | = ira_may_move_in_cost[last_mode_for_init_move_cost]; | |
1622 | ira_may_move_out_cost[mode] | |
1623 | = ira_may_move_out_cost[last_mode_for_init_move_cost]; | |
e80ccebc RS |
1624 | return; |
1625 | } | |
ed9e2ed0 | 1626 | last_mode_for_init_move_cost = mode; |
7cc61ee4 RS |
1627 | ira_register_move_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); |
1628 | ira_may_move_in_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); | |
1629 | ira_may_move_out_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES); | |
ed9e2ed0 | 1630 | for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++) |
fef37404 VM |
1631 | for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++) |
1632 | { | |
1633 | int cost; | |
1634 | enum reg_class *p1, *p2; | |
1635 | ||
1636 | if (last_move_cost[cl1][cl2] == 65535) | |
1637 | { | |
1638 | ira_register_move_cost[mode][cl1][cl2] = 65535; | |
1639 | ira_may_move_in_cost[mode][cl1][cl2] = 65535; | |
1640 | ira_may_move_out_cost[mode][cl1][cl2] = 65535; | |
1641 | } | |
1642 | else | |
1643 | { | |
1644 | cost = last_move_cost[cl1][cl2]; | |
1645 | ||
1646 | for (p2 = ®_class_subclasses[cl2][0]; | |
1647 | *p2 != LIM_REG_CLASSES; p2++) | |
1648 | if (ira_class_hard_regs_num[*p2] > 0 | |
1649 | && (ira_reg_class_max_nregs[*p2][mode] | |
1650 | <= ira_class_hard_regs_num[*p2])) | |
1651 | cost = MAX (cost, ira_register_move_cost[mode][cl1][*p2]); | |
1652 | ||
1653 | for (p1 = ®_class_subclasses[cl1][0]; | |
1654 | *p1 != LIM_REG_CLASSES; p1++) | |
1655 | if (ira_class_hard_regs_num[*p1] > 0 | |
1656 | && (ira_reg_class_max_nregs[*p1][mode] | |
1657 | <= ira_class_hard_regs_num[*p1])) | |
1658 | cost = MAX (cost, ira_register_move_cost[mode][*p1][cl2]); | |
1659 | ||
1660 | ira_assert (cost <= 65535); | |
1661 | ira_register_move_cost[mode][cl1][cl2] = cost; | |
1662 | ||
1663 | if (ira_class_subset_p[cl1][cl2]) | |
1664 | ira_may_move_in_cost[mode][cl1][cl2] = 0; | |
1665 | else | |
1666 | ira_may_move_in_cost[mode][cl1][cl2] = cost; | |
1667 | ||
1668 | if (ira_class_subset_p[cl2][cl1]) | |
1669 | ira_may_move_out_cost[mode][cl1][cl2] = 0; | |
1670 | else | |
1671 | ira_may_move_out_cost[mode][cl1][cl2] = cost; | |
1672 | } | |
1673 | } | |
058e97ec | 1674 | } |
fef37404 | 1675 | |
058e97ec VM |
1676 | \f |
1677 | ||
058e97ec VM |
1678 | /* This is called once during compiler work. It sets up |
1679 | different arrays whose values don't depend on the compiled | |
1680 | function. */ | |
1681 | void | |
1682 | ira_init_once (void) | |
1683 | { | |
058e97ec | 1684 | ira_init_costs_once (); |
55a2c322 | 1685 | lra_init_once (); |
23427d51 RL |
1686 | |
1687 | ira_use_lra_p = targetm.lra_p (); | |
058e97ec VM |
1688 | } |
1689 | ||
7cc61ee4 RS |
1690 | /* Free ira_max_register_move_cost, ira_may_move_in_cost and |
1691 | ira_may_move_out_cost for each mode. */ | |
19c708dc RS |
1692 | void |
1693 | target_ira_int::free_register_move_costs (void) | |
058e97ec | 1694 | { |
e80ccebc | 1695 | int mode, i; |
058e97ec | 1696 | |
e80ccebc RS |
1697 | /* Reset move_cost and friends, making sure we only free shared |
1698 | table entries once. */ | |
1699 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | |
19c708dc | 1700 | if (x_ira_register_move_cost[mode]) |
e80ccebc | 1701 | { |
7cc61ee4 | 1702 | for (i = 0; |
19c708dc RS |
1703 | i < mode && (x_ira_register_move_cost[i] |
1704 | != x_ira_register_move_cost[mode]); | |
7cc61ee4 | 1705 | i++) |
e80ccebc RS |
1706 | ; |
1707 | if (i == mode) | |
1708 | { | |
19c708dc RS |
1709 | free (x_ira_register_move_cost[mode]); |
1710 | free (x_ira_may_move_in_cost[mode]); | |
1711 | free (x_ira_may_move_out_cost[mode]); | |
e80ccebc RS |
1712 | } |
1713 | } | |
19c708dc RS |
1714 | memset (x_ira_register_move_cost, 0, sizeof x_ira_register_move_cost); |
1715 | memset (x_ira_may_move_in_cost, 0, sizeof x_ira_may_move_in_cost); | |
1716 | memset (x_ira_may_move_out_cost, 0, sizeof x_ira_may_move_out_cost); | |
e80ccebc | 1717 | last_mode_for_init_move_cost = -1; |
058e97ec VM |
1718 | } |
1719 | ||
19c708dc RS |
1720 | target_ira_int::~target_ira_int () |
1721 | { | |
1722 | free_ira_costs (); | |
1723 | free_register_move_costs (); | |
1724 | } | |
1725 | ||
058e97ec VM |
1726 | /* This is called every time when register related information is |
1727 | changed. */ | |
1728 | void | |
1729 | ira_init (void) | |
1730 | { | |
19c708dc | 1731 | this_target_ira_int->free_register_move_costs (); |
058e97ec VM |
1732 | setup_reg_mode_hard_regset (); |
1733 | setup_alloc_regs (flag_omit_frame_pointer != 0); | |
1734 | setup_class_subset_and_memory_move_costs (); | |
058e97ec VM |
1735 | setup_reg_class_nregs (); |
1736 | setup_prohibited_class_mode_regs (); | |
1756cb66 VM |
1737 | find_reg_classes (); |
1738 | clarify_prohibited_class_mode_regs (); | |
1739 | setup_hard_regno_aclass (); | |
058e97ec VM |
1740 | ira_init_costs (); |
1741 | } | |
1742 | ||
058e97ec | 1743 | \f |
15e7b94f RS |
1744 | #define ira_prohibited_mode_move_regs_initialized_p \ |
1745 | (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p) | |
058e97ec VM |
1746 | |
1747 | /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */ | |
1748 | static void | |
1749 | setup_prohibited_mode_move_regs (void) | |
1750 | { | |
1751 | int i, j; | |
647d790d DM |
1752 | rtx test_reg1, test_reg2, move_pat; |
1753 | rtx_insn *move_insn; | |
058e97ec VM |
1754 | |
1755 | if (ira_prohibited_mode_move_regs_initialized_p) | |
1756 | return; | |
1757 | ira_prohibited_mode_move_regs_initialized_p = true; | |
c3dc5e66 RS |
1758 | test_reg1 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
1759 | test_reg2 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 2); | |
f7df4a84 | 1760 | move_pat = gen_rtx_SET (test_reg1, test_reg2); |
ed8921dc | 1761 | move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, move_pat, 0, -1, 0); |
058e97ec VM |
1762 | for (i = 0; i < NUM_MACHINE_MODES; i++) |
1763 | { | |
1764 | SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]); | |
1765 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) | |
1766 | { | |
f939c3e6 | 1767 | if (!targetm.hard_regno_mode_ok (j, (machine_mode) i)) |
058e97ec | 1768 | continue; |
8deccbb7 RS |
1769 | set_mode_and_regno (test_reg1, (machine_mode) i, j); |
1770 | set_mode_and_regno (test_reg2, (machine_mode) i, j); | |
058e97ec VM |
1771 | INSN_CODE (move_insn) = -1; |
1772 | recog_memoized (move_insn); | |
1773 | if (INSN_CODE (move_insn) < 0) | |
1774 | continue; | |
1775 | extract_insn (move_insn); | |
daca1a96 RS |
1776 | /* We don't know whether the move will be in code that is optimized |
1777 | for size or speed, so consider all enabled alternatives. */ | |
1778 | if (! constrain_operands (1, get_enabled_alternatives (move_insn))) | |
058e97ec VM |
1779 | continue; |
1780 | CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j); | |
1781 | } | |
1782 | } | |
1783 | } | |
1784 | ||
1785 | \f | |
1786 | ||
73bb8fe9 RS |
1787 | /* Extract INSN and return the set of alternatives that we should consider. |
1788 | This excludes any alternatives whose constraints are obviously impossible | |
1789 | to meet (e.g. because the constraint requires a constant and the operand | |
ed680e2c RS |
1790 | is nonconstant). It also excludes alternatives that are bound to need |
1791 | a spill or reload, as long as we have other alternatives that match | |
1792 | exactly. */ | |
73bb8fe9 RS |
1793 | alternative_mask |
1794 | ira_setup_alts (rtx_insn *insn) | |
3b6d1699 | 1795 | { |
3b6d1699 VM |
1796 | int nop, nalt; |
1797 | bool curr_swapped; | |
1798 | const char *p; | |
3b6d1699 VM |
1799 | int commutative = -1; |
1800 | ||
1801 | extract_insn (insn); | |
06a65e80 | 1802 | preprocess_constraints (insn); |
9840b2fa | 1803 | alternative_mask preferred = get_preferred_alternatives (insn); |
73bb8fe9 | 1804 | alternative_mask alts = 0; |
ed680e2c | 1805 | alternative_mask exact_alts = 0; |
3b6d1699 VM |
1806 | /* Check that the hard reg set is enough for holding all |
1807 | alternatives. It is hard to imagine the situation when the | |
1808 | assertion is wrong. */ | |
1809 | ira_assert (recog_data.n_alternatives | |
1810 | <= (int) MAX (sizeof (HARD_REG_ELT_TYPE) * CHAR_BIT, | |
1811 | FIRST_PSEUDO_REGISTER)); | |
06a65e80 RS |
1812 | for (nop = 0; nop < recog_data.n_operands; nop++) |
1813 | if (recog_data.constraints[nop][0] == '%') | |
1814 | { | |
1815 | commutative = nop; | |
1816 | break; | |
1817 | } | |
3b6d1699 VM |
1818 | for (curr_swapped = false;; curr_swapped = true) |
1819 | { | |
3b6d1699 VM |
1820 | for (nalt = 0; nalt < recog_data.n_alternatives; nalt++) |
1821 | { | |
ed680e2c | 1822 | if (!TEST_BIT (preferred, nalt) || TEST_BIT (exact_alts, nalt)) |
3b6d1699 VM |
1823 | continue; |
1824 | ||
06a65e80 RS |
1825 | const operand_alternative *op_alt |
1826 | = &recog_op_alt[nalt * recog_data.n_operands]; | |
ed680e2c | 1827 | int this_reject = 0; |
3b6d1699 VM |
1828 | for (nop = 0; nop < recog_data.n_operands; nop++) |
1829 | { | |
1830 | int c, len; | |
1831 | ||
ed680e2c RS |
1832 | this_reject += op_alt[nop].reject; |
1833 | ||
fab27f52 | 1834 | rtx op = recog_data.operand[nop]; |
06a65e80 | 1835 | p = op_alt[nop].constraint; |
3b6d1699 VM |
1836 | if (*p == 0 || *p == ',') |
1837 | continue; | |
ed680e2c RS |
1838 | |
1839 | bool win_p = false; | |
3b6d1699 VM |
1840 | do |
1841 | switch (c = *p, len = CONSTRAINT_LEN (c, p), c) | |
1842 | { | |
1843 | case '#': | |
1844 | case ',': | |
1845 | c = '\0'; | |
191816a3 | 1846 | /* FALLTHRU */ |
3b6d1699 VM |
1847 | case '\0': |
1848 | len = 0; | |
1849 | break; | |
1850 | ||
3b6d1699 | 1851 | case '%': |
3f12f020 | 1852 | /* The commutative modifier is handled above. */ |
3b6d1699 VM |
1853 | break; |
1854 | ||
3b6d1699 VM |
1855 | case '0': case '1': case '2': case '3': case '4': |
1856 | case '5': case '6': case '7': case '8': case '9': | |
ed680e2c RS |
1857 | { |
1858 | rtx other = recog_data.operand[c - '0']; | |
1859 | if (MEM_P (other) | |
1860 | ? rtx_equal_p (other, op) | |
1861 | : REG_P (op) || SUBREG_P (op)) | |
1862 | goto op_success; | |
1863 | win_p = true; | |
1864 | } | |
3b6d1699 VM |
1865 | break; |
1866 | ||
3b6d1699 | 1867 | case 'g': |
3b6d1699 VM |
1868 | goto op_success; |
1869 | break; | |
1870 | ||
1871 | default: | |
1872 | { | |
777e635f RS |
1873 | enum constraint_num cn = lookup_constraint (p); |
1874 | switch (get_constraint_type (cn)) | |
1875 | { | |
1876 | case CT_REGISTER: | |
1877 | if (reg_class_for_constraint (cn) != NO_REGS) | |
ed680e2c RS |
1878 | { |
1879 | if (REG_P (op) || SUBREG_P (op)) | |
1880 | goto op_success; | |
1881 | win_p = true; | |
1882 | } | |
777e635f RS |
1883 | break; |
1884 | ||
d9c35eee RS |
1885 | case CT_CONST_INT: |
1886 | if (CONST_INT_P (op) | |
1887 | && (insn_const_int_ok_for_constraint | |
1888 | (INTVAL (op), cn))) | |
1889 | goto op_success; | |
1890 | break; | |
1891 | ||
777e635f | 1892 | case CT_ADDRESS: |
ed680e2c RS |
1893 | goto op_success; |
1894 | ||
777e635f | 1895 | case CT_MEMORY: |
9eb1ca69 | 1896 | case CT_SPECIAL_MEMORY: |
ed680e2c RS |
1897 | if (MEM_P (op)) |
1898 | goto op_success; | |
1899 | win_p = true; | |
1900 | break; | |
777e635f RS |
1901 | |
1902 | case CT_FIXED_FORM: | |
1903 | if (constraint_satisfied_p (op, cn)) | |
1904 | goto op_success; | |
1905 | break; | |
1906 | } | |
3b6d1699 VM |
1907 | break; |
1908 | } | |
1909 | } | |
1910 | while (p += len, c); | |
ed680e2c RS |
1911 | if (!win_p) |
1912 | break; | |
1913 | /* We can make the alternative match by spilling a register | |
1914 | to memory or loading something into a register. Count a | |
1915 | cost of one reload (the equivalent of the '?' constraint). */ | |
1916 | this_reject += 6; | |
3b6d1699 VM |
1917 | op_success: |
1918 | ; | |
1919 | } | |
ed680e2c | 1920 | |
3b6d1699 | 1921 | if (nop >= recog_data.n_operands) |
ed680e2c RS |
1922 | { |
1923 | alts |= ALTERNATIVE_BIT (nalt); | |
1924 | if (this_reject == 0) | |
1925 | exact_alts |= ALTERNATIVE_BIT (nalt); | |
1926 | } | |
3b6d1699 VM |
1927 | } |
1928 | if (commutative < 0) | |
1929 | break; | |
43f4a281 | 1930 | /* Swap forth and back to avoid changing recog_data. */ |
fab27f52 MM |
1931 | std::swap (recog_data.operand[commutative], |
1932 | recog_data.operand[commutative + 1]); | |
43f4a281 RB |
1933 | if (curr_swapped) |
1934 | break; | |
3b6d1699 | 1935 | } |
ed680e2c | 1936 | return exact_alts ? exact_alts : alts; |
3b6d1699 VM |
1937 | } |
1938 | ||
1939 | /* Return the number of the output non-early clobber operand which | |
1940 | should be the same in any case as operand with number OP_NUM (or | |
ed680e2c RS |
1941 | negative value if there is no such operand). ALTS is the mask |
1942 | of alternatives that we should consider. */ | |
3b6d1699 | 1943 | int |
73bb8fe9 | 1944 | ira_get_dup_out_num (int op_num, alternative_mask alts) |
3b6d1699 VM |
1945 | { |
1946 | int curr_alt, c, original, dup; | |
1947 | bool ignore_p, use_commut_op_p; | |
1948 | const char *str; | |
3b6d1699 VM |
1949 | |
1950 | if (op_num < 0 || recog_data.n_alternatives == 0) | |
1951 | return -1; | |
98f2f031 RS |
1952 | /* We should find duplications only for input operands. */ |
1953 | if (recog_data.operand_type[op_num] != OP_IN) | |
1954 | return -1; | |
3b6d1699 | 1955 | str = recog_data.constraints[op_num]; |
98f2f031 | 1956 | use_commut_op_p = false; |
3b6d1699 VM |
1957 | for (;;) |
1958 | { | |
777e635f | 1959 | rtx op = recog_data.operand[op_num]; |
3b6d1699 | 1960 | |
73bb8fe9 | 1961 | for (curr_alt = 0, ignore_p = !TEST_BIT (alts, curr_alt), |
98f2f031 | 1962 | original = -1;;) |
3b6d1699 VM |
1963 | { |
1964 | c = *str; | |
1965 | if (c == '\0') | |
1966 | break; | |
98f2f031 | 1967 | if (c == '#') |
3b6d1699 VM |
1968 | ignore_p = true; |
1969 | else if (c == ',') | |
1970 | { | |
1971 | curr_alt++; | |
73bb8fe9 | 1972 | ignore_p = !TEST_BIT (alts, curr_alt); |
3b6d1699 VM |
1973 | } |
1974 | else if (! ignore_p) | |
1975 | switch (c) | |
1976 | { | |
3b6d1699 VM |
1977 | case 'g': |
1978 | goto fail; | |
8677664e | 1979 | default: |
3b6d1699 | 1980 | { |
777e635f RS |
1981 | enum constraint_num cn = lookup_constraint (str); |
1982 | enum reg_class cl = reg_class_for_constraint (cn); | |
1983 | if (cl != NO_REGS | |
1984 | && !targetm.class_likely_spilled_p (cl)) | |
1985 | goto fail; | |
1986 | if (constraint_satisfied_p (op, cn)) | |
3b6d1699 | 1987 | goto fail; |
3b6d1699 VM |
1988 | break; |
1989 | } | |
1990 | ||
1991 | case '0': case '1': case '2': case '3': case '4': | |
1992 | case '5': case '6': case '7': case '8': case '9': | |
1993 | if (original != -1 && original != c) | |
1994 | goto fail; | |
1995 | original = c; | |
1996 | break; | |
1997 | } | |
1998 | str += CONSTRAINT_LEN (c, str); | |
1999 | } | |
2000 | if (original == -1) | |
2001 | goto fail; | |
ae5569fa RS |
2002 | dup = original - '0'; |
2003 | if (recog_data.operand_type[dup] == OP_OUT) | |
3b6d1699 VM |
2004 | return dup; |
2005 | fail: | |
2006 | if (use_commut_op_p) | |
2007 | break; | |
2008 | use_commut_op_p = true; | |
73f793e3 | 2009 | if (recog_data.constraints[op_num][0] == '%') |
3b6d1699 | 2010 | str = recog_data.constraints[op_num + 1]; |
73f793e3 | 2011 | else if (op_num > 0 && recog_data.constraints[op_num - 1][0] == '%') |
3b6d1699 VM |
2012 | str = recog_data.constraints[op_num - 1]; |
2013 | else | |
2014 | break; | |
2015 | } | |
2016 | return -1; | |
2017 | } | |
2018 | ||
2019 | \f | |
2020 | ||
2021 | /* Search forward to see if the source register of a copy insn dies | |
2022 | before either it or the destination register is modified, but don't | |
2023 | scan past the end of the basic block. If so, we can replace the | |
2024 | source with the destination and let the source die in the copy | |
2025 | insn. | |
2026 | ||
2027 | This will reduce the number of registers live in that range and may | |
2028 | enable the destination and the source coalescing, thus often saving | |
2029 | one register in addition to a register-register copy. */ | |
2030 | ||
2031 | static void | |
2032 | decrease_live_ranges_number (void) | |
2033 | { | |
2034 | basic_block bb; | |
070a1983 | 2035 | rtx_insn *insn; |
7da26277 TS |
2036 | rtx set, src, dest, dest_death, note; |
2037 | rtx_insn *p, *q; | |
3b6d1699 VM |
2038 | int sregno, dregno; |
2039 | ||
2040 | if (! flag_expensive_optimizations) | |
2041 | return; | |
2042 | ||
2043 | if (ira_dump_file) | |
2044 | fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n"); | |
2045 | ||
11cd3bed | 2046 | FOR_EACH_BB_FN (bb, cfun) |
3b6d1699 VM |
2047 | FOR_BB_INSNS (bb, insn) |
2048 | { | |
2049 | set = single_set (insn); | |
2050 | if (! set) | |
2051 | continue; | |
2052 | src = SET_SRC (set); | |
2053 | dest = SET_DEST (set); | |
2054 | if (! REG_P (src) || ! REG_P (dest) | |
2055 | || find_reg_note (insn, REG_DEAD, src)) | |
2056 | continue; | |
2057 | sregno = REGNO (src); | |
2058 | dregno = REGNO (dest); | |
2059 | ||
2060 | /* We don't want to mess with hard regs if register classes | |
2061 | are small. */ | |
2062 | if (sregno == dregno | |
2063 | || (targetm.small_register_classes_for_mode_p (GET_MODE (src)) | |
2064 | && (sregno < FIRST_PSEUDO_REGISTER | |
2065 | || dregno < FIRST_PSEUDO_REGISTER)) | |
2066 | /* We don't see all updates to SP if they are in an | |
2067 | auto-inc memory reference, so we must disallow this | |
2068 | optimization on them. */ | |
2069 | || sregno == STACK_POINTER_REGNUM | |
2070 | || dregno == STACK_POINTER_REGNUM) | |
2071 | continue; | |
2072 | ||
2073 | dest_death = NULL_RTX; | |
2074 | ||
2075 | for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) | |
2076 | { | |
2077 | if (! INSN_P (p)) | |
2078 | continue; | |
2079 | if (BLOCK_FOR_INSN (p) != bb) | |
2080 | break; | |
2081 | ||
2082 | if (reg_set_p (src, p) || reg_set_p (dest, p) | |
2083 | /* If SRC is an asm-declared register, it must not be | |
2084 | replaced in any asm. Unfortunately, the REG_EXPR | |
2085 | tree for the asm variable may be absent in the SRC | |
2086 | rtx, so we can't check the actual register | |
2087 | declaration easily (the asm operand will have it, | |
2088 | though). To avoid complicating the test for a rare | |
2089 | case, we just don't perform register replacement | |
2090 | for a hard reg mentioned in an asm. */ | |
2091 | || (sregno < FIRST_PSEUDO_REGISTER | |
2092 | && asm_noperands (PATTERN (p)) >= 0 | |
2093 | && reg_overlap_mentioned_p (src, PATTERN (p))) | |
2094 | /* Don't change hard registers used by a call. */ | |
2095 | || (CALL_P (p) && sregno < FIRST_PSEUDO_REGISTER | |
2096 | && find_reg_fusage (p, USE, src)) | |
2097 | /* Don't change a USE of a register. */ | |
2098 | || (GET_CODE (PATTERN (p)) == USE | |
2099 | && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0)))) | |
2100 | break; | |
2101 | ||
2102 | /* See if all of SRC dies in P. This test is slightly | |
2103 | more conservative than it needs to be. */ | |
2104 | if ((note = find_regno_note (p, REG_DEAD, sregno)) | |
2105 | && GET_MODE (XEXP (note, 0)) == GET_MODE (src)) | |
2106 | { | |
2107 | int failed = 0; | |
2108 | ||
2109 | /* We can do the optimization. Scan forward from INSN | |
2110 | again, replacing regs as we go. Set FAILED if a | |
2111 | replacement can't be done. In that case, we can't | |
2112 | move the death note for SRC. This should be | |
2113 | rare. */ | |
2114 | ||
2115 | /* Set to stop at next insn. */ | |
2116 | for (q = next_real_insn (insn); | |
2117 | q != next_real_insn (p); | |
2118 | q = next_real_insn (q)) | |
2119 | { | |
2120 | if (reg_overlap_mentioned_p (src, PATTERN (q))) | |
2121 | { | |
2122 | /* If SRC is a hard register, we might miss | |
2123 | some overlapping registers with | |
2124 | validate_replace_rtx, so we would have to | |
2125 | undo it. We can't if DEST is present in | |
2126 | the insn, so fail in that combination of | |
2127 | cases. */ | |
2128 | if (sregno < FIRST_PSEUDO_REGISTER | |
2129 | && reg_mentioned_p (dest, PATTERN (q))) | |
2130 | failed = 1; | |
2131 | ||
2132 | /* Attempt to replace all uses. */ | |
2133 | else if (!validate_replace_rtx (src, dest, q)) | |
2134 | failed = 1; | |
2135 | ||
2136 | /* If this succeeded, but some part of the | |
2137 | register is still present, undo the | |
2138 | replacement. */ | |
2139 | else if (sregno < FIRST_PSEUDO_REGISTER | |
2140 | && reg_overlap_mentioned_p (src, PATTERN (q))) | |
2141 | { | |
2142 | validate_replace_rtx (dest, src, q); | |
2143 | failed = 1; | |
2144 | } | |
2145 | } | |
2146 | ||
2147 | /* If DEST dies here, remove the death note and | |
2148 | save it for later. Make sure ALL of DEST dies | |
2149 | here; again, this is overly conservative. */ | |
2150 | if (! dest_death | |
2151 | && (dest_death = find_regno_note (q, REG_DEAD, dregno))) | |
2152 | { | |
2153 | if (GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest)) | |
2154 | remove_note (q, dest_death); | |
2155 | else | |
2156 | { | |
2157 | failed = 1; | |
2158 | dest_death = 0; | |
2159 | } | |
2160 | } | |
2161 | } | |
2162 | ||
2163 | if (! failed) | |
2164 | { | |
2165 | /* Move death note of SRC from P to INSN. */ | |
2166 | remove_note (p, note); | |
2167 | XEXP (note, 1) = REG_NOTES (insn); | |
2168 | REG_NOTES (insn) = note; | |
2169 | } | |
2170 | ||
2171 | /* DEST is also dead if INSN has a REG_UNUSED note for | |
2172 | DEST. */ | |
2173 | if (! dest_death | |
2174 | && (dest_death | |
2175 | = find_regno_note (insn, REG_UNUSED, dregno))) | |
2176 | { | |
2177 | PUT_REG_NOTE_KIND (dest_death, REG_DEAD); | |
2178 | remove_note (insn, dest_death); | |
2179 | } | |
2180 | ||
2181 | /* Put death note of DEST on P if we saw it die. */ | |
2182 | if (dest_death) | |
2183 | { | |
2184 | XEXP (dest_death, 1) = REG_NOTES (p); | |
2185 | REG_NOTES (p) = dest_death; | |
2186 | } | |
2187 | break; | |
2188 | } | |
2189 | ||
2190 | /* If SRC is a hard register which is set or killed in | |
2191 | some other way, we can't do this optimization. */ | |
2192 | else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src)) | |
2193 | break; | |
2194 | } | |
2195 | } | |
2196 | } | |
2197 | ||
2198 | \f | |
2199 | ||
0896cc66 JL |
2200 | /* Return nonzero if REGNO is a particularly bad choice for reloading X. */ |
2201 | static bool | |
2202 | ira_bad_reload_regno_1 (int regno, rtx x) | |
2203 | { | |
ac0ab4f7 | 2204 | int x_regno, n, i; |
0896cc66 JL |
2205 | ira_allocno_t a; |
2206 | enum reg_class pref; | |
2207 | ||
2208 | /* We only deal with pseudo regs. */ | |
2209 | if (! x || GET_CODE (x) != REG) | |
2210 | return false; | |
2211 | ||
2212 | x_regno = REGNO (x); | |
2213 | if (x_regno < FIRST_PSEUDO_REGISTER) | |
2214 | return false; | |
2215 | ||
2216 | /* If the pseudo prefers REGNO explicitly, then do not consider | |
2217 | REGNO a bad spill choice. */ | |
2218 | pref = reg_preferred_class (x_regno); | |
2219 | if (reg_class_size[pref] == 1) | |
2220 | return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno); | |
2221 | ||
2222 | /* If the pseudo conflicts with REGNO, then we consider REGNO a | |
2223 | poor choice for a reload regno. */ | |
2224 | a = ira_regno_allocno_map[x_regno]; | |
ac0ab4f7 BS |
2225 | n = ALLOCNO_NUM_OBJECTS (a); |
2226 | for (i = 0; i < n; i++) | |
2227 | { | |
2228 | ira_object_t obj = ALLOCNO_OBJECT (a, i); | |
2229 | if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno)) | |
2230 | return true; | |
2231 | } | |
0896cc66 JL |
2232 | return false; |
2233 | } | |
2234 | ||
2235 | /* Return nonzero if REGNO is a particularly bad choice for reloading | |
2236 | IN or OUT. */ | |
2237 | bool | |
2238 | ira_bad_reload_regno (int regno, rtx in, rtx out) | |
2239 | { | |
2240 | return (ira_bad_reload_regno_1 (regno, in) | |
2241 | || ira_bad_reload_regno_1 (regno, out)); | |
2242 | } | |
2243 | ||
b748fbd6 | 2244 | /* Add register clobbers from asm statements. */ |
058e97ec | 2245 | static void |
b748fbd6 | 2246 | compute_regs_asm_clobbered (void) |
058e97ec VM |
2247 | { |
2248 | basic_block bb; | |
2249 | ||
11cd3bed | 2250 | FOR_EACH_BB_FN (bb, cfun) |
058e97ec | 2251 | { |
070a1983 | 2252 | rtx_insn *insn; |
058e97ec VM |
2253 | FOR_BB_INSNS_REVERSE (bb, insn) |
2254 | { | |
bfac633a | 2255 | df_ref def; |
058e97ec | 2256 | |
93671519 | 2257 | if (NONDEBUG_INSN_P (insn) && asm_noperands (PATTERN (insn)) >= 0) |
bfac633a | 2258 | FOR_EACH_INSN_DEF (def, insn) |
058e97ec | 2259 | { |
058e97ec | 2260 | unsigned int dregno = DF_REF_REGNO (def); |
d108e679 AS |
2261 | if (HARD_REGISTER_NUM_P (dregno)) |
2262 | add_to_hard_reg_set (&crtl->asm_clobbers, | |
2263 | GET_MODE (DF_REF_REAL_REG (def)), | |
2264 | dregno); | |
058e97ec VM |
2265 | } |
2266 | } | |
2267 | } | |
2268 | } | |
2269 | ||
2270 | ||
8d49e7ef VM |
2271 | /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and |
2272 | REGS_EVER_LIVE. */ | |
ce18efcb | 2273 | void |
8d49e7ef | 2274 | ira_setup_eliminable_regset (void) |
058e97ec | 2275 | { |
89ceba31 | 2276 | int i; |
058e97ec | 2277 | static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; |
53680238 | 2278 | |
0064f49e WD |
2279 | /* Setup is_leaf as frame_pointer_required may use it. This function |
2280 | is called by sched_init before ira if scheduling is enabled. */ | |
2281 | crtl->is_leaf = leaf_function_p (); | |
2282 | ||
058e97ec VM |
2283 | /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore |
2284 | sp for alloca. So we can't eliminate the frame pointer in that | |
2285 | case. At some point, we should improve this by emitting the | |
2286 | sp-adjusting insns for this case. */ | |
55a2c322 | 2287 | frame_pointer_needed |
058e97ec VM |
2288 | = (! flag_omit_frame_pointer |
2289 | || (cfun->calls_alloca && EXIT_IGNORE_STACK) | |
7700cd85 EB |
2290 | /* We need the frame pointer to catch stack overflow exceptions if |
2291 | the stack pointer is moving (as for the alloca case just above). */ | |
2292 | || (STACK_CHECK_MOVING_SP | |
2293 | && flag_stack_check | |
2294 | && flag_exceptions | |
2295 | && cfun->can_throw_non_call_exceptions) | |
058e97ec | 2296 | || crtl->accesses_prior_frames |
8d49e7ef | 2297 | || (SUPPORTS_STACK_ALIGNMENT && crtl->stack_realign_needed) |
b52b1749 | 2298 | || targetm.frame_pointer_required ()); |
058e97ec | 2299 | |
8d49e7ef VM |
2300 | /* The chance that FRAME_POINTER_NEEDED is changed from inspecting |
2301 | RTL is very small. So if we use frame pointer for RA and RTL | |
2302 | actually prevents this, we will spill pseudos assigned to the | |
2303 | frame pointer in LRA. */ | |
058e97ec | 2304 | |
55a2c322 VM |
2305 | if (frame_pointer_needed) |
2306 | df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true); | |
2307 | ||
058e97ec VM |
2308 | COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs); |
2309 | CLEAR_HARD_REG_SET (eliminable_regset); | |
2310 | ||
b748fbd6 PB |
2311 | compute_regs_asm_clobbered (); |
2312 | ||
058e97ec VM |
2313 | /* Build the regset of all eliminable registers and show we can't |
2314 | use those that we already know won't be eliminated. */ | |
058e97ec VM |
2315 | for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) |
2316 | { | |
2317 | bool cannot_elim | |
7b5cbb57 | 2318 | = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to) |
55a2c322 | 2319 | || (eliminables[i].to == STACK_POINTER_REGNUM && frame_pointer_needed)); |
058e97ec | 2320 | |
b748fbd6 | 2321 | if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from)) |
058e97ec VM |
2322 | { |
2323 | SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from); | |
2324 | ||
2325 | if (cannot_elim) | |
2326 | SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from); | |
2327 | } | |
2328 | else if (cannot_elim) | |
a9c697b8 | 2329 | error ("%s cannot be used in %<asm%> here", |
058e97ec VM |
2330 | reg_names[eliminables[i].from]); |
2331 | else | |
2332 | df_set_regs_ever_live (eliminables[i].from, true); | |
2333 | } | |
c3e08036 | 2334 | if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) |
058e97ec | 2335 | { |
c3e08036 TS |
2336 | if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM)) |
2337 | { | |
2338 | SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM); | |
2339 | if (frame_pointer_needed) | |
2340 | SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM); | |
2341 | } | |
2342 | else if (frame_pointer_needed) | |
a9c697b8 | 2343 | error ("%s cannot be used in %<asm%> here", |
c3e08036 TS |
2344 | reg_names[HARD_FRAME_POINTER_REGNUM]); |
2345 | else | |
2346 | df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true); | |
058e97ec | 2347 | } |
058e97ec VM |
2348 | } |
2349 | ||
2350 | \f | |
2351 | ||
2af2dbdc VM |
2352 | /* Vector of substitutions of register numbers, |
2353 | used to map pseudo regs into hardware regs. | |
2354 | This is set up as a result of register allocation. | |
2355 | Element N is the hard reg assigned to pseudo reg N, | |
2356 | or is -1 if no hard reg was assigned. | |
2357 | If N is a hard reg number, element N is N. */ | |
2358 | short *reg_renumber; | |
2359 | ||
058e97ec VM |
2360 | /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from |
2361 | the allocation found by IRA. */ | |
2362 | static void | |
2363 | setup_reg_renumber (void) | |
2364 | { | |
2365 | int regno, hard_regno; | |
2366 | ira_allocno_t a; | |
2367 | ira_allocno_iterator ai; | |
2368 | ||
2369 | caller_save_needed = 0; | |
2370 | FOR_EACH_ALLOCNO (a, ai) | |
2371 | { | |
55a2c322 VM |
2372 | if (ira_use_lra_p && ALLOCNO_CAP_MEMBER (a) != NULL) |
2373 | continue; | |
058e97ec VM |
2374 | /* There are no caps at this point. */ |
2375 | ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL); | |
2376 | if (! ALLOCNO_ASSIGNED_P (a)) | |
2377 | /* It can happen if A is not referenced but partially anticipated | |
2378 | somewhere in a region. */ | |
2379 | ALLOCNO_ASSIGNED_P (a) = true; | |
2380 | ira_free_allocno_updated_costs (a); | |
2381 | hard_regno = ALLOCNO_HARD_REGNO (a); | |
1756cb66 | 2382 | regno = ALLOCNO_REGNO (a); |
058e97ec | 2383 | reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno); |
1756cb66 | 2384 | if (hard_regno >= 0) |
058e97ec | 2385 | { |
1756cb66 VM |
2386 | int i, nwords; |
2387 | enum reg_class pclass; | |
2388 | ira_object_t obj; | |
2389 | ||
2390 | pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)]; | |
2391 | nwords = ALLOCNO_NUM_OBJECTS (a); | |
2392 | for (i = 0; i < nwords; i++) | |
2393 | { | |
2394 | obj = ALLOCNO_OBJECT (a, i); | |
2395 | IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), | |
2396 | reg_class_contents[pclass]); | |
2397 | } | |
2398 | if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0 | |
9181a6e5 VM |
2399 | && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a), |
2400 | call_used_reg_set)) | |
1756cb66 VM |
2401 | { |
2402 | ira_assert (!optimize || flag_caller_saves | |
e384e6b5 BS |
2403 | || (ALLOCNO_CALLS_CROSSED_NUM (a) |
2404 | == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)) | |
15652f68 | 2405 | || regno >= ira_reg_equiv_len |
55a2c322 | 2406 | || ira_equiv_no_lvalue_p (regno)); |
1756cb66 VM |
2407 | caller_save_needed = 1; |
2408 | } | |
058e97ec VM |
2409 | } |
2410 | } | |
2411 | } | |
2412 | ||
2413 | /* Set up allocno assignment flags for further allocation | |
2414 | improvements. */ | |
2415 | static void | |
2416 | setup_allocno_assignment_flags (void) | |
2417 | { | |
2418 | int hard_regno; | |
2419 | ira_allocno_t a; | |
2420 | ira_allocno_iterator ai; | |
2421 | ||
2422 | FOR_EACH_ALLOCNO (a, ai) | |
2423 | { | |
2424 | if (! ALLOCNO_ASSIGNED_P (a)) | |
2425 | /* It can happen if A is not referenced but partially anticipated | |
2426 | somewhere in a region. */ | |
2427 | ira_free_allocno_updated_costs (a); | |
2428 | hard_regno = ALLOCNO_HARD_REGNO (a); | |
2429 | /* Don't assign hard registers to allocnos which are destination | |
2430 | of removed store at the end of loop. It has no sense to keep | |
2431 | the same value in different hard registers. It is also | |
2432 | impossible to assign hard registers correctly to such | |
2433 | allocnos because the cost info and info about intersected | |
2434 | calls are incorrect for them. */ | |
2435 | ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0 | |
1756cb66 | 2436 | || ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p |
058e97ec | 2437 | || (ALLOCNO_MEMORY_COST (a) |
1756cb66 | 2438 | - ALLOCNO_CLASS_COST (a)) < 0); |
9181a6e5 VM |
2439 | ira_assert |
2440 | (hard_regno < 0 | |
2441 | || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a), | |
2442 | reg_class_contents[ALLOCNO_CLASS (a)])); | |
058e97ec VM |
2443 | } |
2444 | } | |
2445 | ||
2446 | /* Evaluate overall allocation cost and the costs for using hard | |
2447 | registers and memory for allocnos. */ | |
2448 | static void | |
2449 | calculate_allocation_cost (void) | |
2450 | { | |
2451 | int hard_regno, cost; | |
2452 | ira_allocno_t a; | |
2453 | ira_allocno_iterator ai; | |
2454 | ||
2455 | ira_overall_cost = ira_reg_cost = ira_mem_cost = 0; | |
2456 | FOR_EACH_ALLOCNO (a, ai) | |
2457 | { | |
2458 | hard_regno = ALLOCNO_HARD_REGNO (a); | |
2459 | ira_assert (hard_regno < 0 | |
9181a6e5 VM |
2460 | || (ira_hard_reg_in_set_p |
2461 | (hard_regno, ALLOCNO_MODE (a), | |
2462 | reg_class_contents[ALLOCNO_CLASS (a)]))); | |
058e97ec VM |
2463 | if (hard_regno < 0) |
2464 | { | |
2465 | cost = ALLOCNO_MEMORY_COST (a); | |
2466 | ira_mem_cost += cost; | |
2467 | } | |
2468 | else if (ALLOCNO_HARD_REG_COSTS (a) != NULL) | |
2469 | { | |
2470 | cost = (ALLOCNO_HARD_REG_COSTS (a) | |
2471 | [ira_class_hard_reg_index | |
1756cb66 | 2472 | [ALLOCNO_CLASS (a)][hard_regno]]); |
058e97ec VM |
2473 | ira_reg_cost += cost; |
2474 | } | |
2475 | else | |
2476 | { | |
1756cb66 | 2477 | cost = ALLOCNO_CLASS_COST (a); |
058e97ec VM |
2478 | ira_reg_cost += cost; |
2479 | } | |
2480 | ira_overall_cost += cost; | |
2481 | } | |
2482 | ||
2483 | if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL) | |
2484 | { | |
2485 | fprintf (ira_dump_file, | |
16998094 JM |
2486 | "+++Costs: overall %" PRId64 |
2487 | ", reg %" PRId64 | |
2488 | ", mem %" PRId64 | |
2489 | ", ld %" PRId64 | |
2490 | ", st %" PRId64 | |
2491 | ", move %" PRId64, | |
058e97ec VM |
2492 | ira_overall_cost, ira_reg_cost, ira_mem_cost, |
2493 | ira_load_cost, ira_store_cost, ira_shuffle_cost); | |
2bf7560b | 2494 | fprintf (ira_dump_file, "\n+++ move loops %d, new jumps %d\n", |
058e97ec VM |
2495 | ira_move_loops_num, ira_additional_jumps_num); |
2496 | } | |
2497 | ||
2498 | } | |
2499 | ||
2500 | #ifdef ENABLE_IRA_CHECKING | |
2501 | /* Check the correctness of the allocation. We do need this because | |
2502 | of complicated code to transform more one region internal | |
2503 | representation into one region representation. */ | |
2504 | static void | |
2505 | check_allocation (void) | |
2506 | { | |
fa86d337 | 2507 | ira_allocno_t a; |
ac0ab4f7 | 2508 | int hard_regno, nregs, conflict_nregs; |
058e97ec VM |
2509 | ira_allocno_iterator ai; |
2510 | ||
2511 | FOR_EACH_ALLOCNO (a, ai) | |
2512 | { | |
ac0ab4f7 BS |
2513 | int n = ALLOCNO_NUM_OBJECTS (a); |
2514 | int i; | |
fa86d337 | 2515 | |
058e97ec VM |
2516 | if (ALLOCNO_CAP_MEMBER (a) != NULL |
2517 | || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0) | |
2518 | continue; | |
ad474626 | 2519 | nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a)); |
8cfd82bf BS |
2520 | if (nregs == 1) |
2521 | /* We allocated a single hard register. */ | |
2522 | n = 1; | |
2523 | else if (n > 1) | |
2524 | /* We allocated multiple hard registers, and we will test | |
2525 | conflicts in a granularity of single hard regs. */ | |
2526 | nregs = 1; | |
2527 | ||
ac0ab4f7 BS |
2528 | for (i = 0; i < n; i++) |
2529 | { | |
2530 | ira_object_t obj = ALLOCNO_OBJECT (a, i); | |
2531 | ira_object_t conflict_obj; | |
2532 | ira_object_conflict_iterator oci; | |
2533 | int this_regno = hard_regno; | |
2534 | if (n > 1) | |
fa86d337 | 2535 | { |
2805e6c0 | 2536 | if (REG_WORDS_BIG_ENDIAN) |
ac0ab4f7 BS |
2537 | this_regno += n - i - 1; |
2538 | else | |
2539 | this_regno += i; | |
2540 | } | |
2541 | FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci) | |
2542 | { | |
2543 | ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj); | |
2544 | int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a); | |
2545 | if (conflict_hard_regno < 0) | |
2546 | continue; | |
8cfd82bf | 2547 | |
ad474626 RS |
2548 | conflict_nregs = hard_regno_nregs (conflict_hard_regno, |
2549 | ALLOCNO_MODE (conflict_a)); | |
8cfd82bf BS |
2550 | |
2551 | if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1 | |
2552 | && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a)) | |
ac0ab4f7 | 2553 | { |
2805e6c0 | 2554 | if (REG_WORDS_BIG_ENDIAN) |
ac0ab4f7 BS |
2555 | conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a) |
2556 | - OBJECT_SUBWORD (conflict_obj) - 1); | |
2557 | else | |
2558 | conflict_hard_regno += OBJECT_SUBWORD (conflict_obj); | |
2559 | conflict_nregs = 1; | |
2560 | } | |
ac0ab4f7 BS |
2561 | |
2562 | if ((conflict_hard_regno <= this_regno | |
2563 | && this_regno < conflict_hard_regno + conflict_nregs) | |
2564 | || (this_regno <= conflict_hard_regno | |
2565 | && conflict_hard_regno < this_regno + nregs)) | |
fa86d337 BS |
2566 | { |
2567 | fprintf (stderr, "bad allocation for %d and %d\n", | |
2568 | ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a)); | |
2569 | gcc_unreachable (); | |
2570 | } | |
2571 | } | |
2572 | } | |
058e97ec VM |
2573 | } |
2574 | } | |
2575 | #endif | |
2576 | ||
55a2c322 VM |
2577 | /* Allocate REG_EQUIV_INIT. Set up it from IRA_REG_EQUIV which should |
2578 | be already calculated. */ | |
2579 | static void | |
2580 | setup_reg_equiv_init (void) | |
2581 | { | |
2582 | int i; | |
2583 | int max_regno = max_reg_num (); | |
2584 | ||
2585 | for (i = 0; i < max_regno; i++) | |
2586 | reg_equiv_init (i) = ira_reg_equiv[i].init_insns; | |
2587 | } | |
2588 | ||
2589 | /* Update equiv regno from movement of FROM_REGNO to TO_REGNO. INSNS | |
2590 | are insns which were generated for such movement. It is assumed | |
2591 | that FROM_REGNO and TO_REGNO always have the same value at the | |
2592 | point of any move containing such registers. This function is used | |
2593 | to update equiv info for register shuffles on the region borders | |
2594 | and for caller save/restore insns. */ | |
2595 | void | |
b32d5189 | 2596 | ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns) |
55a2c322 | 2597 | { |
b32d5189 DM |
2598 | rtx_insn *insn; |
2599 | rtx x, note; | |
55a2c322 VM |
2600 | |
2601 | if (! ira_reg_equiv[from_regno].defined_p | |
2602 | && (! ira_reg_equiv[to_regno].defined_p | |
2603 | || ((x = ira_reg_equiv[to_regno].memory) != NULL_RTX | |
2604 | && ! MEM_READONLY_P (x)))) | |
5a107a0f | 2605 | return; |
55a2c322 VM |
2606 | insn = insns; |
2607 | if (NEXT_INSN (insn) != NULL_RTX) | |
2608 | { | |
2609 | if (! ira_reg_equiv[to_regno].defined_p) | |
2610 | { | |
2611 | ira_assert (ira_reg_equiv[to_regno].init_insns == NULL_RTX); | |
2612 | return; | |
2613 | } | |
2614 | ira_reg_equiv[to_regno].defined_p = false; | |
2615 | ira_reg_equiv[to_regno].memory | |
2616 | = ira_reg_equiv[to_regno].constant | |
2617 | = ira_reg_equiv[to_regno].invariant | |
0cc97fc5 | 2618 | = ira_reg_equiv[to_regno].init_insns = NULL; |
55a2c322 VM |
2619 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) |
2620 | fprintf (ira_dump_file, | |
2621 | " Invalidating equiv info for reg %d\n", to_regno); | |
2622 | return; | |
2623 | } | |
2624 | /* It is possible that FROM_REGNO still has no equivalence because | |
2625 | in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd | |
2626 | insn was not processed yet. */ | |
2627 | if (ira_reg_equiv[from_regno].defined_p) | |
2628 | { | |
2629 | ira_reg_equiv[to_regno].defined_p = true; | |
2630 | if ((x = ira_reg_equiv[from_regno].memory) != NULL_RTX) | |
2631 | { | |
2632 | ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX | |
2633 | && ira_reg_equiv[from_regno].constant == NULL_RTX); | |
2634 | ira_assert (ira_reg_equiv[to_regno].memory == NULL_RTX | |
2635 | || rtx_equal_p (ira_reg_equiv[to_regno].memory, x)); | |
2636 | ira_reg_equiv[to_regno].memory = x; | |
2637 | if (! MEM_READONLY_P (x)) | |
2638 | /* We don't add the insn to insn init list because memory | |
2639 | equivalence is just to say what memory is better to use | |
2640 | when the pseudo is spilled. */ | |
2641 | return; | |
2642 | } | |
2643 | else if ((x = ira_reg_equiv[from_regno].constant) != NULL_RTX) | |
2644 | { | |
2645 | ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX); | |
2646 | ira_assert (ira_reg_equiv[to_regno].constant == NULL_RTX | |
2647 | || rtx_equal_p (ira_reg_equiv[to_regno].constant, x)); | |
2648 | ira_reg_equiv[to_regno].constant = x; | |
2649 | } | |
2650 | else | |
2651 | { | |
2652 | x = ira_reg_equiv[from_regno].invariant; | |
2653 | ira_assert (x != NULL_RTX); | |
2654 | ira_assert (ira_reg_equiv[to_regno].invariant == NULL_RTX | |
2655 | || rtx_equal_p (ira_reg_equiv[to_regno].invariant, x)); | |
2656 | ira_reg_equiv[to_regno].invariant = x; | |
2657 | } | |
2658 | if (find_reg_note (insn, REG_EQUIV, x) == NULL_RTX) | |
2659 | { | |
2c797321 | 2660 | note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (x)); |
55a2c322 VM |
2661 | gcc_assert (note != NULL_RTX); |
2662 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) | |
2663 | { | |
2664 | fprintf (ira_dump_file, | |
2665 | " Adding equiv note to insn %u for reg %d ", | |
2666 | INSN_UID (insn), to_regno); | |
cfbeaedf | 2667 | dump_value_slim (ira_dump_file, x, 1); |
55a2c322 VM |
2668 | fprintf (ira_dump_file, "\n"); |
2669 | } | |
2670 | } | |
2671 | } | |
2672 | ira_reg_equiv[to_regno].init_insns | |
2673 | = gen_rtx_INSN_LIST (VOIDmode, insn, | |
2674 | ira_reg_equiv[to_regno].init_insns); | |
2675 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL) | |
2676 | fprintf (ira_dump_file, | |
2677 | " Adding equiv init move insn %u to reg %d\n", | |
2678 | INSN_UID (insn), to_regno); | |
2679 | } | |
2680 | ||
058e97ec VM |
2681 | /* Fix values of array REG_EQUIV_INIT after live range splitting done |
2682 | by IRA. */ | |
2683 | static void | |
2684 | fix_reg_equiv_init (void) | |
2685 | { | |
70cc3288 | 2686 | int max_regno = max_reg_num (); |
f2034d06 | 2687 | int i, new_regno, max; |
618bccf9 TS |
2688 | rtx set; |
2689 | rtx_insn_list *x, *next, *prev; | |
2690 | rtx_insn *insn; | |
b8698a0f | 2691 | |
70cc3288 | 2692 | if (max_regno_before_ira < max_regno) |
058e97ec | 2693 | { |
9771b263 | 2694 | max = vec_safe_length (reg_equivs); |
f2034d06 JL |
2695 | grow_reg_equivs (); |
2696 | for (i = FIRST_PSEUDO_REGISTER; i < max; i++) | |
618bccf9 | 2697 | for (prev = NULL, x = reg_equiv_init (i); |
f2034d06 JL |
2698 | x != NULL_RTX; |
2699 | x = next) | |
058e97ec | 2700 | { |
618bccf9 TS |
2701 | next = x->next (); |
2702 | insn = x->insn (); | |
2703 | set = single_set (insn); | |
058e97ec VM |
2704 | ira_assert (set != NULL_RTX |
2705 | && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set)))); | |
2706 | if (REG_P (SET_DEST (set)) | |
2707 | && ((int) REGNO (SET_DEST (set)) == i | |
2708 | || (int) ORIGINAL_REGNO (SET_DEST (set)) == i)) | |
2709 | new_regno = REGNO (SET_DEST (set)); | |
2710 | else if (REG_P (SET_SRC (set)) | |
2711 | && ((int) REGNO (SET_SRC (set)) == i | |
2712 | || (int) ORIGINAL_REGNO (SET_SRC (set)) == i)) | |
2713 | new_regno = REGNO (SET_SRC (set)); | |
2714 | else | |
2715 | gcc_unreachable (); | |
2716 | if (new_regno == i) | |
2717 | prev = x; | |
2718 | else | |
2719 | { | |
55a2c322 | 2720 | /* Remove the wrong list element. */ |
058e97ec | 2721 | if (prev == NULL_RTX) |
f2034d06 | 2722 | reg_equiv_init (i) = next; |
058e97ec VM |
2723 | else |
2724 | XEXP (prev, 1) = next; | |
f2034d06 JL |
2725 | XEXP (x, 1) = reg_equiv_init (new_regno); |
2726 | reg_equiv_init (new_regno) = x; | |
058e97ec VM |
2727 | } |
2728 | } | |
2729 | } | |
2730 | } | |
2731 | ||
2732 | #ifdef ENABLE_IRA_CHECKING | |
2733 | /* Print redundant memory-memory copies. */ | |
2734 | static void | |
2735 | print_redundant_copies (void) | |
2736 | { | |
2737 | int hard_regno; | |
2738 | ira_allocno_t a; | |
2739 | ira_copy_t cp, next_cp; | |
2740 | ira_allocno_iterator ai; | |
b8698a0f | 2741 | |
058e97ec VM |
2742 | FOR_EACH_ALLOCNO (a, ai) |
2743 | { | |
2744 | if (ALLOCNO_CAP_MEMBER (a) != NULL) | |
2b9c63a2 | 2745 | /* It is a cap. */ |
058e97ec VM |
2746 | continue; |
2747 | hard_regno = ALLOCNO_HARD_REGNO (a); | |
2748 | if (hard_regno >= 0) | |
2749 | continue; | |
2750 | for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp) | |
2751 | if (cp->first == a) | |
2752 | next_cp = cp->next_first_allocno_copy; | |
2753 | else | |
2754 | { | |
2755 | next_cp = cp->next_second_allocno_copy; | |
2756 | if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL | |
2757 | && cp->insn != NULL_RTX | |
2758 | && ALLOCNO_HARD_REGNO (cp->first) == hard_regno) | |
2759 | fprintf (ira_dump_file, | |
2760 | " Redundant move from %d(freq %d):%d\n", | |
2761 | INSN_UID (cp->insn), cp->freq, hard_regno); | |
2762 | } | |
2763 | } | |
2764 | } | |
2765 | #endif | |
2766 | ||
2767 | /* Setup preferred and alternative classes for new pseudo-registers | |
2768 | created by IRA starting with START. */ | |
2769 | static void | |
2770 | setup_preferred_alternate_classes_for_new_pseudos (int start) | |
2771 | { | |
2772 | int i, old_regno; | |
2773 | int max_regno = max_reg_num (); | |
2774 | ||
2775 | for (i = start; i < max_regno; i++) | |
2776 | { | |
2777 | old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]); | |
b8698a0f | 2778 | ira_assert (i != old_regno); |
058e97ec | 2779 | setup_reg_classes (i, reg_preferred_class (old_regno), |
ce18efcb | 2780 | reg_alternate_class (old_regno), |
1756cb66 | 2781 | reg_allocno_class (old_regno)); |
058e97ec VM |
2782 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL) |
2783 | fprintf (ira_dump_file, | |
2784 | " New r%d: setting preferred %s, alternative %s\n", | |
2785 | i, reg_class_names[reg_preferred_class (old_regno)], | |
2786 | reg_class_names[reg_alternate_class (old_regno)]); | |
2787 | } | |
2788 | } | |
2789 | ||
2790 | \f | |
df3e3493 | 2791 | /* The number of entries allocated in reg_info. */ |
fb99ee9b | 2792 | static int allocated_reg_info_size; |
058e97ec VM |
2793 | |
2794 | /* Regional allocation can create new pseudo-registers. This function | |
2795 | expands some arrays for pseudo-registers. */ | |
2796 | static void | |
fb99ee9b | 2797 | expand_reg_info (void) |
058e97ec VM |
2798 | { |
2799 | int i; | |
2800 | int size = max_reg_num (); | |
2801 | ||
2802 | resize_reg_info (); | |
fb99ee9b | 2803 | for (i = allocated_reg_info_size; i < size; i++) |
ce18efcb | 2804 | setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS); |
fb99ee9b BS |
2805 | setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size); |
2806 | allocated_reg_info_size = size; | |
058e97ec VM |
2807 | } |
2808 | ||
3553f0bb VM |
2809 | /* Return TRUE if there is too high register pressure in the function. |
2810 | It is used to decide when stack slot sharing is worth to do. */ | |
2811 | static bool | |
2812 | too_high_register_pressure_p (void) | |
2813 | { | |
2814 | int i; | |
1756cb66 | 2815 | enum reg_class pclass; |
b8698a0f | 2816 | |
1756cb66 | 2817 | for (i = 0; i < ira_pressure_classes_num; i++) |
3553f0bb | 2818 | { |
1756cb66 VM |
2819 | pclass = ira_pressure_classes[i]; |
2820 | if (ira_loop_tree_root->reg_pressure[pclass] > 10000) | |
3553f0bb VM |
2821 | return true; |
2822 | } | |
2823 | return false; | |
2824 | } | |
2825 | ||
058e97ec VM |
2826 | \f |
2827 | ||
2af2dbdc VM |
2828 | /* Indicate that hard register number FROM was eliminated and replaced with |
2829 | an offset from hard register number TO. The status of hard registers live | |
2830 | at the start of a basic block is updated by replacing a use of FROM with | |
2831 | a use of TO. */ | |
2832 | ||
2833 | void | |
2834 | mark_elimination (int from, int to) | |
2835 | { | |
2836 | basic_block bb; | |
bf744527 | 2837 | bitmap r; |
2af2dbdc | 2838 | |
11cd3bed | 2839 | FOR_EACH_BB_FN (bb, cfun) |
2af2dbdc | 2840 | { |
bf744527 SB |
2841 | r = DF_LR_IN (bb); |
2842 | if (bitmap_bit_p (r, from)) | |
2843 | { | |
2844 | bitmap_clear_bit (r, from); | |
2845 | bitmap_set_bit (r, to); | |
2846 | } | |
2847 | if (! df_live) | |
2848 | continue; | |
2849 | r = DF_LIVE_IN (bb); | |
2850 | if (bitmap_bit_p (r, from)) | |
2af2dbdc | 2851 | { |
bf744527 SB |
2852 | bitmap_clear_bit (r, from); |
2853 | bitmap_set_bit (r, to); | |
2af2dbdc VM |
2854 | } |
2855 | } | |
2856 | } | |
2857 | ||
2858 | \f | |
2859 | ||
55a2c322 VM |
2860 | /* The length of the following array. */ |
2861 | int ira_reg_equiv_len; | |
2862 | ||
2863 | /* Info about equiv. info for each register. */ | |
4c2b2d79 | 2864 | struct ira_reg_equiv_s *ira_reg_equiv; |
55a2c322 VM |
2865 | |
2866 | /* Expand ira_reg_equiv if necessary. */ | |
2867 | void | |
2868 | ira_expand_reg_equiv (void) | |
2869 | { | |
2870 | int old = ira_reg_equiv_len; | |
2871 | ||
2872 | if (ira_reg_equiv_len > max_reg_num ()) | |
2873 | return; | |
2874 | ira_reg_equiv_len = max_reg_num () * 3 / 2 + 1; | |
2875 | ira_reg_equiv | |
4c2b2d79 | 2876 | = (struct ira_reg_equiv_s *) xrealloc (ira_reg_equiv, |
55a2c322 | 2877 | ira_reg_equiv_len |
4c2b2d79 | 2878 | * sizeof (struct ira_reg_equiv_s)); |
55a2c322 VM |
2879 | gcc_assert (old < ira_reg_equiv_len); |
2880 | memset (ira_reg_equiv + old, 0, | |
4c2b2d79 | 2881 | sizeof (struct ira_reg_equiv_s) * (ira_reg_equiv_len - old)); |
55a2c322 VM |
2882 | } |
2883 | ||
2884 | static void | |
2885 | init_reg_equiv (void) | |
2886 | { | |
2887 | ira_reg_equiv_len = 0; | |
2888 | ira_reg_equiv = NULL; | |
2889 | ira_expand_reg_equiv (); | |
2890 | } | |
2891 | ||
2892 | static void | |
2893 | finish_reg_equiv (void) | |
2894 | { | |
2895 | free (ira_reg_equiv); | |
2896 | } | |
2897 | ||
2898 | \f | |
2899 | ||
2af2dbdc VM |
2900 | struct equivalence |
2901 | { | |
2af2dbdc VM |
2902 | /* Set when a REG_EQUIV note is found or created. Use to |
2903 | keep track of what memory accesses might be created later, | |
2904 | e.g. by reload. */ | |
2905 | rtx replacement; | |
2906 | rtx *src_p; | |
fb0ab697 JL |
2907 | |
2908 | /* The list of each instruction which initializes this register. | |
2909 | ||
2910 | NULL indicates we know nothing about this register's equivalence | |
2911 | properties. | |
2912 | ||
2913 | An INSN_LIST with a NULL insn indicates this pseudo is already | |
2914 | known to not have a valid equivalence. */ | |
2915 | rtx_insn_list *init_insns; | |
2916 | ||
2af2dbdc VM |
2917 | /* Loop depth is used to recognize equivalences which appear |
2918 | to be present within the same loop (or in an inner loop). */ | |
5ffa4e6a | 2919 | short loop_depth; |
2af2dbdc | 2920 | /* Nonzero if this had a preexisting REG_EQUIV note. */ |
5ffa4e6a | 2921 | unsigned char is_arg_equivalence : 1; |
8f5929e1 JJ |
2922 | /* Set when an attempt should be made to replace a register |
2923 | with the associated src_p entry. */ | |
5ffa4e6a FY |
2924 | unsigned char replace : 1; |
2925 | /* Set if this register has no known equivalence. */ | |
2926 | unsigned char no_equiv : 1; | |
8c1d8b59 AM |
2927 | /* Set if this register is mentioned in a paradoxical subreg. */ |
2928 | unsigned char pdx_subregs : 1; | |
2af2dbdc VM |
2929 | }; |
2930 | ||
2931 | /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence | |
2932 | structure for that register. */ | |
2933 | static struct equivalence *reg_equiv; | |
2934 | ||
c7a99fc6 AM |
2935 | /* Used for communication between the following two functions. */ |
2936 | struct equiv_mem_data | |
2937 | { | |
2938 | /* A MEM that we wish to ensure remains unchanged. */ | |
2939 | rtx equiv_mem; | |
2af2dbdc | 2940 | |
c7a99fc6 AM |
2941 | /* Set true if EQUIV_MEM is modified. */ |
2942 | bool equiv_mem_modified; | |
2943 | }; | |
2af2dbdc VM |
2944 | |
2945 | /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified. | |
2946 | Called via note_stores. */ | |
2947 | static void | |
2948 | validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED, | |
c7a99fc6 | 2949 | void *data) |
2af2dbdc | 2950 | { |
c7a99fc6 AM |
2951 | struct equiv_mem_data *info = (struct equiv_mem_data *) data; |
2952 | ||
2af2dbdc | 2953 | if ((REG_P (dest) |
c7a99fc6 | 2954 | && reg_overlap_mentioned_p (dest, info->equiv_mem)) |
2af2dbdc | 2955 | || (MEM_P (dest) |
c7a99fc6 AM |
2956 | && anti_dependence (info->equiv_mem, dest))) |
2957 | info->equiv_mem_modified = true; | |
2af2dbdc VM |
2958 | } |
2959 | ||
63ce14e0 AM |
2960 | enum valid_equiv { valid_none, valid_combine, valid_reload }; |
2961 | ||
2af2dbdc VM |
2962 | /* Verify that no store between START and the death of REG invalidates |
2963 | MEMREF. MEMREF is invalidated by modifying a register used in MEMREF, | |
2964 | by storing into an overlapping memory location, or with a non-const | |
2965 | CALL_INSN. | |
2966 | ||
63ce14e0 AM |
2967 | Return VALID_RELOAD if MEMREF remains valid for both reload and |
2968 | combine_and_move insns, VALID_COMBINE if only valid for | |
2969 | combine_and_move_insns, and VALID_NONE otherwise. */ | |
2970 | static enum valid_equiv | |
b32d5189 | 2971 | validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref) |
2af2dbdc | 2972 | { |
b32d5189 | 2973 | rtx_insn *insn; |
2af2dbdc | 2974 | rtx note; |
c7a99fc6 | 2975 | struct equiv_mem_data info = { memref, false }; |
63ce14e0 | 2976 | enum valid_equiv ret = valid_reload; |
2af2dbdc VM |
2977 | |
2978 | /* If the memory reference has side effects or is volatile, it isn't a | |
2979 | valid equivalence. */ | |
2980 | if (side_effects_p (memref)) | |
63ce14e0 | 2981 | return valid_none; |
2af2dbdc | 2982 | |
c7a99fc6 | 2983 | for (insn = start; insn; insn = NEXT_INSN (insn)) |
2af2dbdc | 2984 | { |
63ce14e0 | 2985 | if (!INSN_P (insn)) |
2af2dbdc VM |
2986 | continue; |
2987 | ||
2988 | if (find_reg_note (insn, REG_DEAD, reg)) | |
63ce14e0 | 2989 | return ret; |
2af2dbdc | 2990 | |
a22265a4 | 2991 | if (CALL_P (insn)) |
63ce14e0 AM |
2992 | { |
2993 | /* We can combine a reg def from one insn into a reg use in | |
2994 | another over a call if the memory is readonly or the call | |
2995 | const/pure. However, we can't set reg_equiv notes up for | |
2996 | reload over any call. The problem is the equivalent form | |
2997 | may reference a pseudo which gets assigned a call | |
2998 | clobbered hard reg. When we later replace REG with its | |
2999 | equivalent form, the value in the call-clobbered reg has | |
3000 | been changed and all hell breaks loose. */ | |
3001 | ret = valid_combine; | |
3002 | if (!MEM_READONLY_P (memref) | |
3003 | && !RTL_CONST_OR_PURE_CALL_P (insn)) | |
3004 | return valid_none; | |
3005 | } | |
2af2dbdc | 3006 | |
c7a99fc6 AM |
3007 | note_stores (PATTERN (insn), validate_equiv_mem_from_store, &info); |
3008 | if (info.equiv_mem_modified) | |
63ce14e0 | 3009 | return valid_none; |
2af2dbdc VM |
3010 | |
3011 | /* If a register mentioned in MEMREF is modified via an | |
3012 | auto-increment, we lose the equivalence. Do the same if one | |
3013 | dies; although we could extend the life, it doesn't seem worth | |
3014 | the trouble. */ | |
3015 | ||
3016 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
3017 | if ((REG_NOTE_KIND (note) == REG_INC | |
3018 | || REG_NOTE_KIND (note) == REG_DEAD) | |
3019 | && REG_P (XEXP (note, 0)) | |
3020 | && reg_overlap_mentioned_p (XEXP (note, 0), memref)) | |
63ce14e0 | 3021 | return valid_none; |
2af2dbdc VM |
3022 | } |
3023 | ||
63ce14e0 | 3024 | return valid_none; |
2af2dbdc VM |
3025 | } |
3026 | ||
3027 | /* Returns zero if X is known to be invariant. */ | |
3028 | static int | |
3029 | equiv_init_varies_p (rtx x) | |
3030 | { | |
3031 | RTX_CODE code = GET_CODE (x); | |
3032 | int i; | |
3033 | const char *fmt; | |
3034 | ||
3035 | switch (code) | |
3036 | { | |
3037 | case MEM: | |
3038 | return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0)); | |
3039 | ||
3040 | case CONST: | |
d8116890 | 3041 | CASE_CONST_ANY: |
2af2dbdc VM |
3042 | case SYMBOL_REF: |
3043 | case LABEL_REF: | |
3044 | return 0; | |
3045 | ||
3046 | case REG: | |
3047 | return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0); | |
3048 | ||
3049 | case ASM_OPERANDS: | |
3050 | if (MEM_VOLATILE_P (x)) | |
3051 | return 1; | |
3052 | ||
3053 | /* Fall through. */ | |
3054 | ||
3055 | default: | |
3056 | break; | |
3057 | } | |
3058 | ||
3059 | fmt = GET_RTX_FORMAT (code); | |
3060 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3061 | if (fmt[i] == 'e') | |
3062 | { | |
3063 | if (equiv_init_varies_p (XEXP (x, i))) | |
3064 | return 1; | |
3065 | } | |
3066 | else if (fmt[i] == 'E') | |
3067 | { | |
3068 | int j; | |
3069 | for (j = 0; j < XVECLEN (x, i); j++) | |
3070 | if (equiv_init_varies_p (XVECEXP (x, i, j))) | |
3071 | return 1; | |
3072 | } | |
3073 | ||
3074 | return 0; | |
3075 | } | |
3076 | ||
3077 | /* Returns nonzero if X (used to initialize register REGNO) is movable. | |
3078 | X is only movable if the registers it uses have equivalent initializations | |
3079 | which appear to be within the same loop (or in an inner loop) and movable | |
3080 | or if they are not candidates for local_alloc and don't vary. */ | |
3081 | static int | |
3082 | equiv_init_movable_p (rtx x, int regno) | |
3083 | { | |
3084 | int i, j; | |
3085 | const char *fmt; | |
3086 | enum rtx_code code = GET_CODE (x); | |
3087 | ||
3088 | switch (code) | |
3089 | { | |
3090 | case SET: | |
3091 | return equiv_init_movable_p (SET_SRC (x), regno); | |
3092 | ||
3093 | case CC0: | |
3094 | case CLOBBER: | |
8df47bdf | 3095 | case CLOBBER_HIGH: |
2af2dbdc VM |
3096 | return 0; |
3097 | ||
3098 | case PRE_INC: | |
3099 | case PRE_DEC: | |
3100 | case POST_INC: | |
3101 | case POST_DEC: | |
3102 | case PRE_MODIFY: | |
3103 | case POST_MODIFY: | |
3104 | return 0; | |
3105 | ||
3106 | case REG: | |
1756cb66 VM |
3107 | return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth |
3108 | && reg_equiv[REGNO (x)].replace) | |
3109 | || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS | |
3110 | && ! rtx_varies_p (x, 0))); | |
2af2dbdc VM |
3111 | |
3112 | case UNSPEC_VOLATILE: | |
3113 | return 0; | |
3114 | ||
3115 | case ASM_OPERANDS: | |
3116 | if (MEM_VOLATILE_P (x)) | |
3117 | return 0; | |
3118 | ||
3119 | /* Fall through. */ | |
3120 | ||
3121 | default: | |
3122 | break; | |
3123 | } | |
3124 | ||
3125 | fmt = GET_RTX_FORMAT (code); | |
3126 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3127 | switch (fmt[i]) | |
3128 | { | |
3129 | case 'e': | |
3130 | if (! equiv_init_movable_p (XEXP (x, i), regno)) | |
3131 | return 0; | |
3132 | break; | |
3133 | case 'E': | |
3134 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
3135 | if (! equiv_init_movable_p (XVECEXP (x, i, j), regno)) | |
3136 | return 0; | |
3137 | break; | |
3138 | } | |
3139 | ||
3140 | return 1; | |
3141 | } | |
3142 | ||
cc30d932 VM |
3143 | static bool memref_referenced_p (rtx memref, rtx x, bool read_p); |
3144 | ||
3145 | /* Auxiliary function for memref_referenced_p. Process setting X for | |
3146 | MEMREF store. */ | |
3147 | static bool | |
3148 | process_set_for_memref_referenced_p (rtx memref, rtx x) | |
3149 | { | |
3150 | /* If we are setting a MEM, it doesn't count (its address does), but any | |
3151 | other SET_DEST that has a MEM in it is referencing the MEM. */ | |
3152 | if (MEM_P (x)) | |
3153 | { | |
3154 | if (memref_referenced_p (memref, XEXP (x, 0), true)) | |
3155 | return true; | |
3156 | } | |
3157 | else if (memref_referenced_p (memref, x, false)) | |
3158 | return true; | |
3159 | ||
3160 | return false; | |
3161 | } | |
3162 | ||
3163 | /* TRUE if X references a memory location (as a read if READ_P) that | |
3164 | would be affected by a store to MEMREF. */ | |
3165 | static bool | |
3166 | memref_referenced_p (rtx memref, rtx x, bool read_p) | |
2af2dbdc VM |
3167 | { |
3168 | int i, j; | |
3169 | const char *fmt; | |
3170 | enum rtx_code code = GET_CODE (x); | |
3171 | ||
3172 | switch (code) | |
3173 | { | |
2af2dbdc VM |
3174 | case CONST: |
3175 | case LABEL_REF: | |
3176 | case SYMBOL_REF: | |
d8116890 | 3177 | CASE_CONST_ANY: |
2af2dbdc VM |
3178 | case PC: |
3179 | case CC0: | |
3180 | case HIGH: | |
3181 | case LO_SUM: | |
cc30d932 | 3182 | return false; |
2af2dbdc VM |
3183 | |
3184 | case REG: | |
3185 | return (reg_equiv[REGNO (x)].replacement | |
3186 | && memref_referenced_p (memref, | |
cc30d932 | 3187 | reg_equiv[REGNO (x)].replacement, read_p)); |
2af2dbdc VM |
3188 | |
3189 | case MEM: | |
cc30d932 VM |
3190 | /* Memory X might have another effective type than MEMREF. */ |
3191 | if (read_p || true_dependence (memref, VOIDmode, x)) | |
3192 | return true; | |
2af2dbdc VM |
3193 | break; |
3194 | ||
3195 | case SET: | |
cc30d932 VM |
3196 | if (process_set_for_memref_referenced_p (memref, SET_DEST (x))) |
3197 | return true; | |
3198 | ||
3199 | return memref_referenced_p (memref, SET_SRC (x), true); | |
3200 | ||
3201 | case CLOBBER: | |
3202 | case CLOBBER_HIGH: | |
3203 | if (process_set_for_memref_referenced_p (memref, XEXP (x, 0))) | |
3204 | return true; | |
2af2dbdc | 3205 | |
cc30d932 VM |
3206 | return false; |
3207 | ||
3208 | case PRE_DEC: | |
3209 | case POST_DEC: | |
3210 | case PRE_INC: | |
3211 | case POST_INC: | |
3212 | if (process_set_for_memref_referenced_p (memref, XEXP (x, 0))) | |
3213 | return true; | |
3214 | ||
3215 | return memref_referenced_p (memref, XEXP (x, 0), true); | |
3216 | ||
3217 | case POST_MODIFY: | |
3218 | case PRE_MODIFY: | |
3219 | /* op0 = op0 + op1 */ | |
3220 | if (process_set_for_memref_referenced_p (memref, XEXP (x, 0))) | |
3221 | return true; | |
3222 | ||
3223 | if (memref_referenced_p (memref, XEXP (x, 0), true)) | |
3224 | return true; | |
3225 | ||
3226 | return memref_referenced_p (memref, XEXP (x, 1), true); | |
2af2dbdc VM |
3227 | |
3228 | default: | |
3229 | break; | |
3230 | } | |
3231 | ||
3232 | fmt = GET_RTX_FORMAT (code); | |
3233 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3234 | switch (fmt[i]) | |
3235 | { | |
3236 | case 'e': | |
cc30d932 VM |
3237 | if (memref_referenced_p (memref, XEXP (x, i), read_p)) |
3238 | return true; | |
2af2dbdc VM |
3239 | break; |
3240 | case 'E': | |
3241 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
cc30d932 VM |
3242 | if (memref_referenced_p (memref, XVECEXP (x, i, j), read_p)) |
3243 | return true; | |
2af2dbdc VM |
3244 | break; |
3245 | } | |
3246 | ||
cc30d932 | 3247 | return false; |
2af2dbdc VM |
3248 | } |
3249 | ||
3250 | /* TRUE if some insn in the range (START, END] references a memory location | |
14d7d4be JL |
3251 | that would be affected by a store to MEMREF. |
3252 | ||
3253 | Callers should not call this routine if START is after END in the | |
3254 | RTL chain. */ | |
3255 | ||
2af2dbdc | 3256 | static int |
b32d5189 | 3257 | memref_used_between_p (rtx memref, rtx_insn *start, rtx_insn *end) |
2af2dbdc | 3258 | { |
b32d5189 | 3259 | rtx_insn *insn; |
2af2dbdc | 3260 | |
14d7d4be JL |
3261 | for (insn = NEXT_INSN (start); |
3262 | insn && insn != NEXT_INSN (end); | |
2af2dbdc VM |
3263 | insn = NEXT_INSN (insn)) |
3264 | { | |
b5b8b0ac | 3265 | if (!NONDEBUG_INSN_P (insn)) |
2af2dbdc | 3266 | continue; |
b8698a0f | 3267 | |
cc30d932 | 3268 | if (memref_referenced_p (memref, PATTERN (insn), false)) |
2af2dbdc VM |
3269 | return 1; |
3270 | ||
3271 | /* Nonconst functions may access memory. */ | |
3272 | if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn))) | |
3273 | return 1; | |
3274 | } | |
3275 | ||
14d7d4be | 3276 | gcc_assert (insn == NEXT_INSN (end)); |
2af2dbdc VM |
3277 | return 0; |
3278 | } | |
3279 | ||
3280 | /* Mark REG as having no known equivalence. | |
3281 | Some instructions might have been processed before and furnished | |
3282 | with REG_EQUIV notes for this register; these notes will have to be | |
3283 | removed. | |
3284 | STORE is the piece of RTL that does the non-constant / conflicting | |
3285 | assignment - a SET, CLOBBER or REG_INC note. It is currently not used, | |
3286 | but needs to be there because this function is called from note_stores. */ | |
3287 | static void | |
1756cb66 VM |
3288 | no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED, |
3289 | void *data ATTRIBUTE_UNUSED) | |
2af2dbdc VM |
3290 | { |
3291 | int regno; | |
fb0ab697 | 3292 | rtx_insn_list *list; |
2af2dbdc VM |
3293 | |
3294 | if (!REG_P (reg)) | |
3295 | return; | |
3296 | regno = REGNO (reg); | |
5ffa4e6a | 3297 | reg_equiv[regno].no_equiv = 1; |
2af2dbdc | 3298 | list = reg_equiv[regno].init_insns; |
fb0ab697 | 3299 | if (list && list->insn () == NULL) |
2af2dbdc | 3300 | return; |
fb0ab697 | 3301 | reg_equiv[regno].init_insns = gen_rtx_INSN_LIST (VOIDmode, NULL_RTX, NULL); |
2af2dbdc VM |
3302 | reg_equiv[regno].replacement = NULL_RTX; |
3303 | /* This doesn't matter for equivalences made for argument registers, we | |
3304 | should keep their initialization insns. */ | |
3305 | if (reg_equiv[regno].is_arg_equivalence) | |
3306 | return; | |
55a2c322 | 3307 | ira_reg_equiv[regno].defined_p = false; |
0cc97fc5 | 3308 | ira_reg_equiv[regno].init_insns = NULL; |
fb0ab697 | 3309 | for (; list; list = list->next ()) |
2af2dbdc | 3310 | { |
fb0ab697 | 3311 | rtx_insn *insn = list->insn (); |
2af2dbdc VM |
3312 | remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX)); |
3313 | } | |
3314 | } | |
3315 | ||
e3f9e0ac WM |
3316 | /* Check whether the SUBREG is a paradoxical subreg and set the result |
3317 | in PDX_SUBREGS. */ | |
3318 | ||
40954ce5 | 3319 | static void |
8c1d8b59 | 3320 | set_paradoxical_subreg (rtx_insn *insn) |
e3f9e0ac | 3321 | { |
40954ce5 RS |
3322 | subrtx_iterator::array_type array; |
3323 | FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST) | |
3324 | { | |
3325 | const_rtx subreg = *iter; | |
3326 | if (GET_CODE (subreg) == SUBREG) | |
3327 | { | |
3328 | const_rtx reg = SUBREG_REG (subreg); | |
3329 | if (REG_P (reg) && paradoxical_subreg_p (subreg)) | |
8c1d8b59 | 3330 | reg_equiv[REGNO (reg)].pdx_subregs = true; |
40954ce5 RS |
3331 | } |
3332 | } | |
e3f9e0ac WM |
3333 | } |
3334 | ||
3a6191b1 JJ |
3335 | /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the |
3336 | equivalent replacement. */ | |
3337 | ||
3338 | static rtx | |
3339 | adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data) | |
3340 | { | |
3341 | if (REG_P (loc)) | |
3342 | { | |
3343 | bitmap cleared_regs = (bitmap) data; | |
3344 | if (bitmap_bit_p (cleared_regs, REGNO (loc))) | |
b8f045e2 | 3345 | return simplify_replace_fn_rtx (copy_rtx (*reg_equiv[REGNO (loc)].src_p), |
3a6191b1 JJ |
3346 | NULL_RTX, adjust_cleared_regs, data); |
3347 | } | |
3348 | return NULL_RTX; | |
3349 | } | |
3350 | ||
a72b242e AM |
3351 | /* Given register REGNO is set only once, return true if the defining |
3352 | insn dominates all uses. */ | |
3353 | ||
3354 | static bool | |
3355 | def_dominates_uses (int regno) | |
3356 | { | |
3357 | df_ref def = DF_REG_DEF_CHAIN (regno); | |
3358 | ||
3359 | struct df_insn_info *def_info = DF_REF_INSN_INFO (def); | |
3360 | /* If this is an artificial def (eh handler regs, hard frame pointer | |
3361 | for non-local goto, regs defined on function entry) then def_info | |
3362 | is NULL and the reg is always live before any use. We might | |
3363 | reasonably return true in that case, but since the only call | |
3364 | of this function is currently here in ira.c when we are looking | |
3365 | at a defining insn we can't have an artificial def as that would | |
3366 | bump DF_REG_DEF_COUNT. */ | |
3367 | gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && def_info != NULL); | |
3368 | ||
3369 | rtx_insn *def_insn = DF_REF_INSN (def); | |
3370 | basic_block def_bb = BLOCK_FOR_INSN (def_insn); | |
3371 | ||
3372 | for (df_ref use = DF_REG_USE_CHAIN (regno); | |
3373 | use; | |
3374 | use = DF_REF_NEXT_REG (use)) | |
3375 | { | |
3376 | struct df_insn_info *use_info = DF_REF_INSN_INFO (use); | |
3377 | /* Only check real uses, not artificial ones. */ | |
3378 | if (use_info) | |
3379 | { | |
3380 | rtx_insn *use_insn = DF_REF_INSN (use); | |
3381 | if (!DEBUG_INSN_P (use_insn)) | |
3382 | { | |
3383 | basic_block use_bb = BLOCK_FOR_INSN (use_insn); | |
3384 | if (use_bb != def_bb | |
3385 | ? !dominated_by_p (CDI_DOMINATORS, use_bb, def_bb) | |
3386 | : DF_INSN_INFO_LUID (use_info) < DF_INSN_INFO_LUID (def_info)) | |
3387 | return false; | |
3388 | } | |
3389 | } | |
3390 | } | |
3391 | return true; | |
3392 | } | |
3393 | ||
2af2dbdc | 3394 | /* Find registers that are equivalent to a single value throughout the |
1756cb66 VM |
3395 | compilation (either because they can be referenced in memory or are |
3396 | set once from a single constant). Lower their priority for a | |
3397 | register. | |
2af2dbdc | 3398 | |
1756cb66 VM |
3399 | If such a register is only referenced once, try substituting its |
3400 | value into the using insn. If it succeeds, we can eliminate the | |
3401 | register completely. | |
2af2dbdc | 3402 | |
ba52669f AM |
3403 | Initialize init_insns in ira_reg_equiv array. */ |
3404 | static void | |
2af2dbdc VM |
3405 | update_equiv_regs (void) |
3406 | { | |
b2908ba6 | 3407 | rtx_insn *insn; |
2af2dbdc | 3408 | basic_block bb; |
2af2dbdc | 3409 | |
8c1d8b59 AM |
3410 | /* Scan insns and set pdx_subregs if the reg is used in a |
3411 | paradoxical subreg. Don't set such reg equivalent to a mem, | |
e3f9e0ac WM |
3412 | because lra will not substitute such equiv memory in order to |
3413 | prevent access beyond allocated memory for paradoxical memory subreg. */ | |
11cd3bed | 3414 | FOR_EACH_BB_FN (bb, cfun) |
e3f9e0ac | 3415 | FOR_BB_INSNS (bb, insn) |
c34c46dd | 3416 | if (NONDEBUG_INSN_P (insn)) |
8c1d8b59 | 3417 | set_paradoxical_subreg (insn); |
e3f9e0ac | 3418 | |
2af2dbdc VM |
3419 | /* Scan the insns and find which registers have equivalences. Do this |
3420 | in a separate scan of the insns because (due to -fcse-follow-jumps) | |
3421 | a register can be set below its use. */ | |
91dabbb2 | 3422 | bitmap setjmp_crosses = regstat_get_setjmp_crosses (); |
11cd3bed | 3423 | FOR_EACH_BB_FN (bb, cfun) |
2af2dbdc | 3424 | { |
91dabbb2 | 3425 | int loop_depth = bb_loop_depth (bb); |
2af2dbdc VM |
3426 | |
3427 | for (insn = BB_HEAD (bb); | |
3428 | insn != NEXT_INSN (BB_END (bb)); | |
3429 | insn = NEXT_INSN (insn)) | |
3430 | { | |
3431 | rtx note; | |
3432 | rtx set; | |
3433 | rtx dest, src; | |
3434 | int regno; | |
3435 | ||
3436 | if (! INSN_P (insn)) | |
3437 | continue; | |
3438 | ||
3439 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
3440 | if (REG_NOTE_KIND (note) == REG_INC) | |
3441 | no_equiv (XEXP (note, 0), note, NULL); | |
3442 | ||
3443 | set = single_set (insn); | |
3444 | ||
3445 | /* If this insn contains more (or less) than a single SET, | |
3446 | only mark all destinations as having no known equivalence. */ | |
07b38331 BS |
3447 | if (set == NULL_RTX |
3448 | || side_effects_p (SET_SRC (set))) | |
2af2dbdc VM |
3449 | { |
3450 | note_stores (PATTERN (insn), no_equiv, NULL); | |
3451 | continue; | |
3452 | } | |
3453 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
3454 | { | |
3455 | int i; | |
3456 | ||
3457 | for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) | |
3458 | { | |
3459 | rtx part = XVECEXP (PATTERN (insn), 0, i); | |
3460 | if (part != set) | |
3461 | note_stores (part, no_equiv, NULL); | |
3462 | } | |
3463 | } | |
3464 | ||
3465 | dest = SET_DEST (set); | |
3466 | src = SET_SRC (set); | |
3467 | ||
3468 | /* See if this is setting up the equivalence between an argument | |
3469 | register and its stack slot. */ | |
3470 | note = find_reg_note (insn, REG_EQUIV, NULL_RTX); | |
3471 | if (note) | |
3472 | { | |
3473 | gcc_assert (REG_P (dest)); | |
3474 | regno = REGNO (dest); | |
3475 | ||
55a2c322 VM |
3476 | /* Note that we don't want to clear init_insns in |
3477 | ira_reg_equiv even if there are multiple sets of this | |
3478 | register. */ | |
2af2dbdc VM |
3479 | reg_equiv[regno].is_arg_equivalence = 1; |
3480 | ||
5a107a0f VM |
3481 | /* The insn result can have equivalence memory although |
3482 | the equivalence is not set up by the insn. We add | |
3483 | this insn to init insns as it is a flag for now that | |
3484 | regno has an equivalence. We will remove the insn | |
3485 | from init insn list later. */ | |
3486 | if (rtx_equal_p (src, XEXP (note, 0)) || MEM_P (XEXP (note, 0))) | |
55a2c322 VM |
3487 | ira_reg_equiv[regno].init_insns |
3488 | = gen_rtx_INSN_LIST (VOIDmode, insn, | |
3489 | ira_reg_equiv[regno].init_insns); | |
2af2dbdc VM |
3490 | |
3491 | /* Continue normally in case this is a candidate for | |
3492 | replacements. */ | |
3493 | } | |
3494 | ||
3495 | if (!optimize) | |
3496 | continue; | |
3497 | ||
3498 | /* We only handle the case of a pseudo register being set | |
3499 | once, or always to the same value. */ | |
1fe28116 VM |
3500 | /* ??? The mn10200 port breaks if we add equivalences for |
3501 | values that need an ADDRESS_REGS register and set them equivalent | |
3502 | to a MEM of a pseudo. The actual problem is in the over-conservative | |
3503 | handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in | |
3504 | calculate_needs, but we traditionally work around this problem | |
3505 | here by rejecting equivalences when the destination is in a register | |
3506 | that's likely spilled. This is fragile, of course, since the | |
3507 | preferred class of a pseudo depends on all instructions that set | |
3508 | or use it. */ | |
3509 | ||
2af2dbdc VM |
3510 | if (!REG_P (dest) |
3511 | || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER | |
fb0ab697 JL |
3512 | || (reg_equiv[regno].init_insns |
3513 | && reg_equiv[regno].init_insns->insn () == NULL) | |
07b8f0a8 | 3514 | || (targetm.class_likely_spilled_p (reg_preferred_class (regno)) |
1fe28116 | 3515 | && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence)) |
2af2dbdc VM |
3516 | { |
3517 | /* This might be setting a SUBREG of a pseudo, a pseudo that is | |
3518 | also set somewhere else to a constant. */ | |
3519 | note_stores (set, no_equiv, NULL); | |
3520 | continue; | |
3521 | } | |
3522 | ||
8c1d8b59 AM |
3523 | /* Don't set reg mentioned in a paradoxical subreg |
3524 | equivalent to a mem. */ | |
3525 | if (MEM_P (src) && reg_equiv[regno].pdx_subregs) | |
e3f9e0ac WM |
3526 | { |
3527 | note_stores (set, no_equiv, NULL); | |
3528 | continue; | |
3529 | } | |
3530 | ||
2af2dbdc VM |
3531 | note = find_reg_note (insn, REG_EQUAL, NULL_RTX); |
3532 | ||
3533 | /* cse sometimes generates function invariants, but doesn't put a | |
3534 | REG_EQUAL note on the insn. Since this note would be redundant, | |
3535 | there's no point creating it earlier than here. */ | |
3536 | if (! note && ! rtx_varies_p (src, 0)) | |
3537 | note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src)); | |
3538 | ||
3539 | /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST | |
2b9c63a2 | 3540 | since it represents a function call. */ |
2af2dbdc VM |
3541 | if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST) |
3542 | note = NULL_RTX; | |
3543 | ||
5ffa4e6a FY |
3544 | if (DF_REG_DEF_COUNT (regno) != 1) |
3545 | { | |
3546 | bool equal_p = true; | |
3547 | rtx_insn_list *list; | |
3548 | ||
3549 | /* If we have already processed this pseudo and determined it | |
67914693 | 3550 | cannot have an equivalence, then honor that decision. */ |
5ffa4e6a FY |
3551 | if (reg_equiv[regno].no_equiv) |
3552 | continue; | |
3553 | ||
3554 | if (! note | |
2af2dbdc VM |
3555 | || rtx_varies_p (XEXP (note, 0), 0) |
3556 | || (reg_equiv[regno].replacement | |
3557 | && ! rtx_equal_p (XEXP (note, 0), | |
5ffa4e6a FY |
3558 | reg_equiv[regno].replacement))) |
3559 | { | |
3560 | no_equiv (dest, set, NULL); | |
3561 | continue; | |
3562 | } | |
3563 | ||
3564 | list = reg_equiv[regno].init_insns; | |
3565 | for (; list; list = list->next ()) | |
3566 | { | |
3567 | rtx note_tmp; | |
3568 | rtx_insn *insn_tmp; | |
3569 | ||
3570 | insn_tmp = list->insn (); | |
3571 | note_tmp = find_reg_note (insn_tmp, REG_EQUAL, NULL_RTX); | |
3572 | gcc_assert (note_tmp); | |
3573 | if (! rtx_equal_p (XEXP (note, 0), XEXP (note_tmp, 0))) | |
3574 | { | |
3575 | equal_p = false; | |
3576 | break; | |
3577 | } | |
3578 | } | |
3579 | ||
3580 | if (! equal_p) | |
3581 | { | |
3582 | no_equiv (dest, set, NULL); | |
3583 | continue; | |
3584 | } | |
2af2dbdc | 3585 | } |
5ffa4e6a | 3586 | |
2af2dbdc VM |
3587 | /* Record this insn as initializing this register. */ |
3588 | reg_equiv[regno].init_insns | |
3589 | = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns); | |
3590 | ||
3591 | /* If this register is known to be equal to a constant, record that | |
a72b242e AM |
3592 | it is always equivalent to the constant. |
3593 | Note that it is possible to have a register use before | |
3594 | the def in loops (see gcc.c-torture/execute/pr79286.c) | |
3595 | where the reg is undefined on first use. If the def insn | |
3596 | won't trap we can use it as an equivalence, effectively | |
3597 | choosing the "undefined" value for the reg to be the | |
3598 | same as the value set by the def. */ | |
2af2dbdc | 3599 | if (DF_REG_DEF_COUNT (regno) == 1 |
a72b242e AM |
3600 | && note |
3601 | && !rtx_varies_p (XEXP (note, 0), 0) | |
08f42414 BE |
3602 | && (!may_trap_or_fault_p (XEXP (note, 0)) |
3603 | || def_dominates_uses (regno))) | |
2af2dbdc VM |
3604 | { |
3605 | rtx note_value = XEXP (note, 0); | |
3606 | remove_note (insn, note); | |
3607 | set_unique_reg_note (insn, REG_EQUIV, note_value); | |
3608 | } | |
3609 | ||
3610 | /* If this insn introduces a "constant" register, decrease the priority | |
3611 | of that register. Record this insn if the register is only used once | |
3612 | more and the equivalence value is the same as our source. | |
3613 | ||
3614 | The latter condition is checked for two reasons: First, it is an | |
3615 | indication that it may be more efficient to actually emit the insn | |
3616 | as written (if no registers are available, reload will substitute | |
3617 | the equivalence). Secondly, it avoids problems with any registers | |
3618 | dying in this insn whose death notes would be missed. | |
3619 | ||
3620 | If we don't have a REG_EQUIV note, see if this insn is loading | |
3621 | a register used only in one basic block from a MEM. If so, and the | |
3622 | MEM remains unchanged for the life of the register, add a REG_EQUIV | |
3623 | note. */ | |
2af2dbdc VM |
3624 | note = find_reg_note (insn, REG_EQUIV, NULL_RTX); |
3625 | ||
63ce14e0 | 3626 | rtx replacement = NULL_RTX; |
2af2dbdc | 3627 | if (note) |
63ce14e0 AM |
3628 | replacement = XEXP (note, 0); |
3629 | else if (REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS | |
3630 | && MEM_P (SET_SRC (set))) | |
2af2dbdc | 3631 | { |
63ce14e0 AM |
3632 | enum valid_equiv validity; |
3633 | validity = validate_equiv_mem (insn, dest, SET_SRC (set)); | |
3634 | if (validity != valid_none) | |
3635 | { | |
3636 | replacement = copy_rtx (SET_SRC (set)); | |
3637 | if (validity == valid_reload) | |
3638 | note = set_unique_reg_note (insn, REG_EQUIV, replacement); | |
3639 | } | |
3640 | } | |
2af2dbdc | 3641 | |
63ce14e0 AM |
3642 | /* If we haven't done so, record for reload that this is an |
3643 | equivalencing insn. */ | |
3644 | if (note && !reg_equiv[regno].is_arg_equivalence) | |
3645 | ira_reg_equiv[regno].init_insns | |
3646 | = gen_rtx_INSN_LIST (VOIDmode, insn, | |
3647 | ira_reg_equiv[regno].init_insns); | |
2af2dbdc | 3648 | |
63ce14e0 AM |
3649 | if (replacement) |
3650 | { | |
3651 | reg_equiv[regno].replacement = replacement; | |
2af2dbdc | 3652 | reg_equiv[regno].src_p = &SET_SRC (set); |
5ffa4e6a | 3653 | reg_equiv[regno].loop_depth = (short) loop_depth; |
2af2dbdc VM |
3654 | |
3655 | /* Don't mess with things live during setjmp. */ | |
91dabbb2 | 3656 | if (optimize && !bitmap_bit_p (setjmp_crosses, regno)) |
2af2dbdc | 3657 | { |
2af2dbdc VM |
3658 | /* If the register is referenced exactly twice, meaning it is |
3659 | set once and used once, indicate that the reference may be | |
3660 | replaced by the equivalence we computed above. Do this | |
3661 | even if the register is only used in one block so that | |
3662 | dependencies can be handled where the last register is | |
3663 | used in a different block (i.e. HIGH / LO_SUM sequences) | |
3664 | and to reduce the number of registers alive across | |
3665 | calls. */ | |
3666 | ||
3667 | if (REG_N_REFS (regno) == 2 | |
63ce14e0 | 3668 | && (rtx_equal_p (replacement, src) |
2af2dbdc VM |
3669 | || ! equiv_init_varies_p (src)) |
3670 | && NONJUMP_INSN_P (insn) | |
3671 | && equiv_init_movable_p (PATTERN (insn), regno)) | |
3672 | reg_equiv[regno].replace = 1; | |
3673 | } | |
3674 | } | |
3675 | } | |
3676 | } | |
42ae0d7f | 3677 | } |
2af2dbdc | 3678 | |
42ae0d7f AM |
3679 | /* For insns that set a MEM to the contents of a REG that is only used |
3680 | in a single basic block, see if the register is always equivalent | |
3681 | to that memory location and if moving the store from INSN to the | |
3682 | insn that sets REG is safe. If so, put a REG_EQUIV note on the | |
3683 | initializing insn. */ | |
3684 | static void | |
3685 | add_store_equivs (void) | |
3686 | { | |
8f9b31f7 | 3687 | auto_bitmap seen_insns; |
2af2dbdc | 3688 | |
42ae0d7f | 3689 | for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
2af2dbdc VM |
3690 | { |
3691 | rtx set, src, dest; | |
3692 | unsigned regno; | |
42ae0d7f | 3693 | rtx_insn *init_insn; |
2af2dbdc | 3694 | |
8f9b31f7 | 3695 | bitmap_set_bit (seen_insns, INSN_UID (insn)); |
14d7d4be | 3696 | |
2af2dbdc VM |
3697 | if (! INSN_P (insn)) |
3698 | continue; | |
3699 | ||
3700 | set = single_set (insn); | |
3701 | if (! set) | |
3702 | continue; | |
3703 | ||
3704 | dest = SET_DEST (set); | |
3705 | src = SET_SRC (set); | |
3706 | ||
42ae0d7f | 3707 | /* Don't add a REG_EQUIV note if the insn already has one. The existing |
10e04446 | 3708 | REG_EQUIV is likely more useful than the one we are adding. */ |
2af2dbdc VM |
3709 | if (MEM_P (dest) && REG_P (src) |
3710 | && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER | |
3711 | && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS | |
3712 | && DF_REG_DEF_COUNT (regno) == 1 | |
8c1d8b59 | 3713 | && ! reg_equiv[regno].pdx_subregs |
fb0ab697 | 3714 | && reg_equiv[regno].init_insns != NULL |
42ae0d7f | 3715 | && (init_insn = reg_equiv[regno].init_insns->insn ()) != 0 |
8f9b31f7 | 3716 | && bitmap_bit_p (seen_insns, INSN_UID (init_insn)) |
42ae0d7f | 3717 | && ! find_reg_note (init_insn, REG_EQUIV, NULL_RTX) |
63ce14e0 | 3718 | && validate_equiv_mem (init_insn, src, dest) == valid_reload |
42ae0d7f AM |
3719 | && ! memref_used_between_p (dest, init_insn, insn) |
3720 | /* Attaching a REG_EQUIV note will fail if INIT_INSN has | |
3721 | multiple sets. */ | |
3722 | && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest))) | |
2af2dbdc | 3723 | { |
42ae0d7f AM |
3724 | /* This insn makes the equivalence, not the one initializing |
3725 | the register. */ | |
3726 | ira_reg_equiv[regno].init_insns | |
3727 | = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX); | |
3728 | df_notes_rescan (init_insn); | |
3729 | if (dump_file) | |
3730 | fprintf (dump_file, | |
3731 | "Adding REG_EQUIV to insn %d for source of insn %d\n", | |
3732 | INSN_UID (init_insn), | |
3733 | INSN_UID (insn)); | |
2af2dbdc VM |
3734 | } |
3735 | } | |
42ae0d7f AM |
3736 | } |
3737 | ||
3738 | /* Scan all regs killed in an insn to see if any of them are registers | |
3739 | only used that once. If so, see if we can replace the reference | |
3740 | with the equivalent form. If we can, delete the initializing | |
3741 | reference and this register will go away. If we can't replace the | |
3742 | reference, and the initializing reference is within the same loop | |
3743 | (or in an inner loop), then move the register initialization just | |
3744 | before the use, so that they are in the same basic block. */ | |
3745 | static void | |
3746 | combine_and_move_insns (void) | |
3747 | { | |
0e3de1d4 | 3748 | auto_bitmap cleared_regs; |
b00544fa | 3749 | int max = max_reg_num (); |
2af2dbdc | 3750 | |
b00544fa | 3751 | for (int regno = FIRST_PSEUDO_REGISTER; regno < max; regno++) |
2af2dbdc | 3752 | { |
b00544fa AM |
3753 | if (!reg_equiv[regno].replace) |
3754 | continue; | |
2af2dbdc | 3755 | |
b00544fa AM |
3756 | rtx_insn *use_insn = 0; |
3757 | for (df_ref use = DF_REG_USE_CHAIN (regno); | |
3758 | use; | |
3759 | use = DF_REF_NEXT_REG (use)) | |
3760 | if (DF_REF_INSN_INFO (use)) | |
3761 | { | |
3762 | if (DEBUG_INSN_P (DF_REF_INSN (use))) | |
3763 | continue; | |
3764 | gcc_assert (!use_insn); | |
3765 | use_insn = DF_REF_INSN (use); | |
3766 | } | |
3767 | gcc_assert (use_insn); | |
2af2dbdc | 3768 | |
b00544fa AM |
3769 | /* Don't substitute into jumps. indirect_jump_optimize does |
3770 | this for anything we are prepared to handle. */ | |
3771 | if (JUMP_P (use_insn)) | |
3772 | continue; | |
3773 | ||
17a938e8 SB |
3774 | /* Also don't substitute into a conditional trap insn -- it can become |
3775 | an unconditional trap, and that is a flow control insn. */ | |
3776 | if (GET_CODE (PATTERN (use_insn)) == TRAP_IF) | |
3777 | continue; | |
3778 | ||
b00544fa AM |
3779 | df_ref def = DF_REG_DEF_CHAIN (regno); |
3780 | gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && DF_REF_INSN_INFO (def)); | |
3781 | rtx_insn *def_insn = DF_REF_INSN (def); | |
3782 | ||
3783 | /* We may not move instructions that can throw, since that | |
3784 | changes basic block boundaries and we are not prepared to | |
3785 | adjust the CFG to match. */ | |
3786 | if (can_throw_internal (def_insn)) | |
3787 | continue; | |
3788 | ||
3789 | basic_block use_bb = BLOCK_FOR_INSN (use_insn); | |
3790 | basic_block def_bb = BLOCK_FOR_INSN (def_insn); | |
3791 | if (bb_loop_depth (use_bb) > bb_loop_depth (def_bb)) | |
3792 | continue; | |
2af2dbdc | 3793 | |
b00544fa AM |
3794 | if (asm_noperands (PATTERN (def_insn)) < 0 |
3795 | && validate_replace_rtx (regno_reg_rtx[regno], | |
3796 | *reg_equiv[regno].src_p, use_insn)) | |
3797 | { | |
3798 | rtx link; | |
3799 | /* Append the REG_DEAD notes from def_insn. */ | |
3800 | for (rtx *p = ®_NOTES (def_insn); (link = *p) != 0; ) | |
2af2dbdc | 3801 | { |
b00544fa | 3802 | if (REG_NOTE_KIND (XEXP (link, 0)) == REG_DEAD) |
2af2dbdc | 3803 | { |
b00544fa AM |
3804 | *p = XEXP (link, 1); |
3805 | XEXP (link, 1) = REG_NOTES (use_insn); | |
3806 | REG_NOTES (use_insn) = link; | |
3807 | } | |
3808 | else | |
3809 | p = &XEXP (link, 1); | |
3810 | } | |
2af2dbdc | 3811 | |
b00544fa AM |
3812 | remove_death (regno, use_insn); |
3813 | SET_REG_N_REFS (regno, 0); | |
3814 | REG_FREQ (regno) = 0; | |
fba12165 BS |
3815 | df_ref use; |
3816 | FOR_EACH_INSN_USE (use, def_insn) | |
3817 | { | |
3818 | unsigned int use_regno = DF_REF_REGNO (use); | |
3819 | if (!HARD_REGISTER_NUM_P (use_regno)) | |
3820 | reg_equiv[use_regno].replace = 0; | |
3821 | } | |
3822 | ||
b00544fa | 3823 | delete_insn (def_insn); |
2af2dbdc | 3824 | |
b00544fa AM |
3825 | reg_equiv[regno].init_insns = NULL; |
3826 | ira_reg_equiv[regno].init_insns = NULL; | |
3827 | bitmap_set_bit (cleared_regs, regno); | |
3828 | } | |
2af2dbdc | 3829 | |
b00544fa AM |
3830 | /* Move the initialization of the register to just before |
3831 | USE_INSN. Update the flow information. */ | |
3832 | else if (prev_nondebug_insn (use_insn) != def_insn) | |
3833 | { | |
3834 | rtx_insn *new_insn; | |
2af2dbdc | 3835 | |
b00544fa AM |
3836 | new_insn = emit_insn_before (PATTERN (def_insn), use_insn); |
3837 | REG_NOTES (new_insn) = REG_NOTES (def_insn); | |
3838 | REG_NOTES (def_insn) = 0; | |
3839 | /* Rescan it to process the notes. */ | |
3840 | df_insn_rescan (new_insn); | |
2af2dbdc | 3841 | |
b00544fa AM |
3842 | /* Make sure this insn is recognized before reload begins, |
3843 | otherwise eliminate_regs_in_insn will die. */ | |
3844 | INSN_CODE (new_insn) = INSN_CODE (def_insn); | |
2af2dbdc | 3845 | |
b00544fa | 3846 | delete_insn (def_insn); |
2af2dbdc | 3847 | |
b00544fa | 3848 | XEXP (reg_equiv[regno].init_insns, 0) = new_insn; |
2af2dbdc | 3849 | |
b00544fa AM |
3850 | REG_BASIC_BLOCK (regno) = use_bb->index; |
3851 | REG_N_CALLS_CROSSED (regno) = 0; | |
2af2dbdc | 3852 | |
b00544fa AM |
3853 | if (use_insn == BB_HEAD (use_bb)) |
3854 | BB_HEAD (use_bb) = new_insn; | |
2af2dbdc | 3855 | |
fcc861d9 AM |
3856 | /* We know regno dies in use_insn, but inside a loop |
3857 | REG_DEAD notes might be missing when def_insn was in | |
3858 | another basic block. However, when we move def_insn into | |
3859 | this bb we'll definitely get a REG_DEAD note and reload | |
3860 | will see the death. It's possible that update_equiv_regs | |
3861 | set up an equivalence referencing regno for a reg set by | |
3862 | use_insn, when regno was seen as non-local. Now that | |
3863 | regno is local to this block, and dies, such an | |
3864 | equivalence is invalid. */ | |
8972f7e9 | 3865 | if (find_reg_note (use_insn, REG_EQUIV, regno_reg_rtx[regno])) |
fcc861d9 AM |
3866 | { |
3867 | rtx set = single_set (use_insn); | |
3868 | if (set && REG_P (SET_DEST (set))) | |
3869 | no_equiv (SET_DEST (set), set, NULL); | |
3870 | } | |
3871 | ||
b00544fa AM |
3872 | ira_reg_equiv[regno].init_insns |
3873 | = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX); | |
3874 | bitmap_set_bit (cleared_regs, regno); | |
2af2dbdc VM |
3875 | } |
3876 | } | |
3877 | ||
3878 | if (!bitmap_empty_p (cleared_regs)) | |
3a6191b1 | 3879 | { |
b00544fa AM |
3880 | basic_block bb; |
3881 | ||
11cd3bed | 3882 | FOR_EACH_BB_FN (bb, cfun) |
3a6191b1 | 3883 | { |
3a6191b1 JJ |
3884 | bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs); |
3885 | bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs); | |
b00544fa | 3886 | if (!df_live) |
bf744527 SB |
3887 | continue; |
3888 | bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs); | |
3889 | bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs); | |
3a6191b1 JJ |
3890 | } |
3891 | ||
3892 | /* Last pass - adjust debug insns referencing cleared regs. */ | |
36f52e8f | 3893 | if (MAY_HAVE_DEBUG_BIND_INSNS) |
b00544fa | 3894 | for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
36f52e8f | 3895 | if (DEBUG_BIND_INSN_P (insn)) |
3a6191b1 JJ |
3896 | { |
3897 | rtx old_loc = INSN_VAR_LOCATION_LOC (insn); | |
3898 | INSN_VAR_LOCATION_LOC (insn) | |
3899 | = simplify_replace_fn_rtx (old_loc, NULL_RTX, | |
3900 | adjust_cleared_regs, | |
3901 | (void *) cleared_regs); | |
3902 | if (old_loc != INSN_VAR_LOCATION_LOC (insn)) | |
3903 | df_insn_rescan (insn); | |
3904 | } | |
3905 | } | |
2af2dbdc VM |
3906 | } |
3907 | ||
6585b2e2 AM |
3908 | /* A pass over indirect jumps, converting simple cases to direct jumps. |
3909 | Combine does this optimization too, but only within a basic block. */ | |
ba52669f AM |
3910 | static void |
3911 | indirect_jump_optimize (void) | |
3912 | { | |
3913 | basic_block bb; | |
3914 | bool rebuild_p = false; | |
3915 | ||
3916 | FOR_EACH_BB_REVERSE_FN (bb, cfun) | |
3917 | { | |
3918 | rtx_insn *insn = BB_END (bb); | |
97eb24c4 JJ |
3919 | if (!JUMP_P (insn) |
3920 | || find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) | |
ba52669f AM |
3921 | continue; |
3922 | ||
3923 | rtx x = pc_set (insn); | |
3924 | if (!x || !REG_P (SET_SRC (x))) | |
3925 | continue; | |
3926 | ||
3927 | int regno = REGNO (SET_SRC (x)); | |
3928 | if (DF_REG_DEF_COUNT (regno) == 1) | |
3929 | { | |
6585b2e2 AM |
3930 | df_ref def = DF_REG_DEF_CHAIN (regno); |
3931 | if (!DF_REF_IS_ARTIFICIAL (def)) | |
ba52669f | 3932 | { |
6585b2e2 | 3933 | rtx_insn *def_insn = DF_REF_INSN (def); |
97eb24c4 JJ |
3934 | rtx lab = NULL_RTX; |
3935 | rtx set = single_set (def_insn); | |
3936 | if (set && GET_CODE (SET_SRC (set)) == LABEL_REF) | |
3937 | lab = SET_SRC (set); | |
3938 | else | |
6585b2e2 | 3939 | { |
97eb24c4 JJ |
3940 | rtx eqnote = find_reg_note (def_insn, REG_EQUAL, NULL_RTX); |
3941 | if (eqnote && GET_CODE (XEXP (eqnote, 0)) == LABEL_REF) | |
3942 | lab = XEXP (eqnote, 0); | |
6585b2e2 | 3943 | } |
97eb24c4 JJ |
3944 | if (lab && validate_replace_rtx (SET_SRC (x), lab, insn)) |
3945 | rebuild_p = true; | |
ba52669f AM |
3946 | } |
3947 | } | |
3948 | } | |
2af2dbdc | 3949 | |
ba52669f AM |
3950 | if (rebuild_p) |
3951 | { | |
3952 | timevar_push (TV_JUMP); | |
3953 | rebuild_jump_labels (get_insns ()); | |
3954 | if (purge_all_dead_edges ()) | |
3955 | delete_unreachable_blocks (); | |
3956 | timevar_pop (TV_JUMP); | |
3957 | } | |
3958 | } | |
3959 | \f | |
55a2c322 VM |
3960 | /* Set up fields memory, constant, and invariant from init_insns in |
3961 | the structures of array ira_reg_equiv. */ | |
3962 | static void | |
3963 | setup_reg_equiv (void) | |
3964 | { | |
3965 | int i; | |
0cc97fc5 DM |
3966 | rtx_insn_list *elem, *prev_elem, *next_elem; |
3967 | rtx_insn *insn; | |
3968 | rtx set, x; | |
55a2c322 VM |
3969 | |
3970 | for (i = FIRST_PSEUDO_REGISTER; i < ira_reg_equiv_len; i++) | |
5a107a0f VM |
3971 | for (prev_elem = NULL, elem = ira_reg_equiv[i].init_insns; |
3972 | elem; | |
3973 | prev_elem = elem, elem = next_elem) | |
55a2c322 | 3974 | { |
0cc97fc5 DM |
3975 | next_elem = elem->next (); |
3976 | insn = elem->insn (); | |
55a2c322 VM |
3977 | set = single_set (insn); |
3978 | ||
3979 | /* Init insns can set up equivalence when the reg is a destination or | |
3980 | a source (in this case the destination is memory). */ | |
3981 | if (set != 0 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set)))) | |
3982 | { | |
3983 | if ((x = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL) | |
5a107a0f VM |
3984 | { |
3985 | x = XEXP (x, 0); | |
3986 | if (REG_P (SET_DEST (set)) | |
3987 | && REGNO (SET_DEST (set)) == (unsigned int) i | |
3988 | && ! rtx_equal_p (SET_SRC (set), x) && MEM_P (x)) | |
3989 | { | |
3990 | /* This insn reporting the equivalence but | |
3991 | actually not setting it. Remove it from the | |
3992 | list. */ | |
3993 | if (prev_elem == NULL) | |
3994 | ira_reg_equiv[i].init_insns = next_elem; | |
3995 | else | |
3996 | XEXP (prev_elem, 1) = next_elem; | |
3997 | elem = prev_elem; | |
3998 | } | |
3999 | } | |
55a2c322 VM |
4000 | else if (REG_P (SET_DEST (set)) |
4001 | && REGNO (SET_DEST (set)) == (unsigned int) i) | |
4002 | x = SET_SRC (set); | |
4003 | else | |
4004 | { | |
4005 | gcc_assert (REG_P (SET_SRC (set)) | |
4006 | && REGNO (SET_SRC (set)) == (unsigned int) i); | |
4007 | x = SET_DEST (set); | |
4008 | } | |
4009 | if (! function_invariant_p (x) | |
4010 | || ! flag_pic | |
4011 | /* A function invariant is often CONSTANT_P but may | |
4012 | include a register. We promise to only pass | |
4013 | CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P. */ | |
4014 | || (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x))) | |
4015 | { | |
4016 | /* It can happen that a REG_EQUIV note contains a MEM | |
4017 | that is not a legitimate memory operand. As later | |
4018 | stages of reload assume that all addresses found in | |
4019 | the lra_regno_equiv_* arrays were originally | |
4020 | legitimate, we ignore such REG_EQUIV notes. */ | |
4021 | if (memory_operand (x, VOIDmode)) | |
4022 | { | |
4023 | ira_reg_equiv[i].defined_p = true; | |
4024 | ira_reg_equiv[i].memory = x; | |
4025 | continue; | |
4026 | } | |
4027 | else if (function_invariant_p (x)) | |
4028 | { | |
ef4bddc2 | 4029 | machine_mode mode; |
55a2c322 VM |
4030 | |
4031 | mode = GET_MODE (SET_DEST (set)); | |
4032 | if (GET_CODE (x) == PLUS | |
4033 | || x == frame_pointer_rtx || x == arg_pointer_rtx) | |
4034 | /* This is PLUS of frame pointer and a constant, | |
4035 | or fp, or argp. */ | |
4036 | ira_reg_equiv[i].invariant = x; | |
4037 | else if (targetm.legitimate_constant_p (mode, x)) | |
4038 | ira_reg_equiv[i].constant = x; | |
4039 | else | |
4040 | { | |
4041 | ira_reg_equiv[i].memory = force_const_mem (mode, x); | |
4042 | if (ira_reg_equiv[i].memory == NULL_RTX) | |
4043 | { | |
4044 | ira_reg_equiv[i].defined_p = false; | |
0cc97fc5 | 4045 | ira_reg_equiv[i].init_insns = NULL; |
55a2c322 VM |
4046 | break; |
4047 | } | |
4048 | } | |
4049 | ira_reg_equiv[i].defined_p = true; | |
4050 | continue; | |
4051 | } | |
4052 | } | |
4053 | } | |
4054 | ira_reg_equiv[i].defined_p = false; | |
0cc97fc5 | 4055 | ira_reg_equiv[i].init_insns = NULL; |
55a2c322 VM |
4056 | break; |
4057 | } | |
4058 | } | |
4059 | ||
4060 | \f | |
4061 | ||
2af2dbdc VM |
4062 | /* Print chain C to FILE. */ |
4063 | static void | |
4064 | print_insn_chain (FILE *file, struct insn_chain *c) | |
4065 | { | |
c3284718 | 4066 | fprintf (file, "insn=%d, ", INSN_UID (c->insn)); |
2af2dbdc VM |
4067 | bitmap_print (file, &c->live_throughout, "live_throughout: ", ", "); |
4068 | bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n"); | |
4069 | } | |
4070 | ||
4071 | ||
4072 | /* Print all reload_insn_chains to FILE. */ | |
4073 | static void | |
4074 | print_insn_chains (FILE *file) | |
4075 | { | |
4076 | struct insn_chain *c; | |
4077 | for (c = reload_insn_chain; c ; c = c->next) | |
4078 | print_insn_chain (file, c); | |
4079 | } | |
4080 | ||
4081 | /* Return true if pseudo REGNO should be added to set live_throughout | |
4082 | or dead_or_set of the insn chains for reload consideration. */ | |
4083 | static bool | |
4084 | pseudo_for_reload_consideration_p (int regno) | |
4085 | { | |
4086 | /* Consider spilled pseudos too for IRA because they still have a | |
4087 | chance to get hard-registers in the reload when IRA is used. */ | |
b100151b | 4088 | return (reg_renumber[regno] >= 0 || ira_conflicts_p); |
2af2dbdc VM |
4089 | } |
4090 | ||
9dcf1f86 RS |
4091 | /* Return true if we can track the individual bytes of subreg X. |
4092 | When returning true, set *OUTER_SIZE to the number of bytes in | |
4093 | X itself, *INNER_SIZE to the number of bytes in the inner register | |
4094 | and *START to the offset of the first byte. */ | |
4095 | static bool | |
4096 | get_subreg_tracking_sizes (rtx x, HOST_WIDE_INT *outer_size, | |
4097 | HOST_WIDE_INT *inner_size, HOST_WIDE_INT *start) | |
4098 | { | |
4099 | rtx reg = regno_reg_rtx[REGNO (SUBREG_REG (x))]; | |
cf098191 RS |
4100 | return (GET_MODE_SIZE (GET_MODE (x)).is_constant (outer_size) |
4101 | && GET_MODE_SIZE (GET_MODE (reg)).is_constant (inner_size) | |
4102 | && SUBREG_BYTE (x).is_constant (start)); | |
9dcf1f86 RS |
4103 | } |
4104 | ||
4105 | /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for | |
4106 | a register with SIZE bytes, making the register live if INIT_VALUE. */ | |
2af2dbdc VM |
4107 | static void |
4108 | init_live_subregs (bool init_value, sbitmap *live_subregs, | |
9dcf1f86 | 4109 | bitmap live_subregs_used, int allocnum, int size) |
2af2dbdc | 4110 | { |
2af2dbdc VM |
4111 | gcc_assert (size > 0); |
4112 | ||
4113 | /* Been there, done that. */ | |
cee784f5 | 4114 | if (bitmap_bit_p (live_subregs_used, allocnum)) |
2af2dbdc VM |
4115 | return; |
4116 | ||
cee784f5 | 4117 | /* Create a new one. */ |
2af2dbdc VM |
4118 | if (live_subregs[allocnum] == NULL) |
4119 | live_subregs[allocnum] = sbitmap_alloc (size); | |
4120 | ||
4121 | /* If the entire reg was live before blasting into subregs, we need | |
4122 | to init all of the subregs to ones else init to 0. */ | |
4123 | if (init_value) | |
f61e445a | 4124 | bitmap_ones (live_subregs[allocnum]); |
b8698a0f | 4125 | else |
f61e445a | 4126 | bitmap_clear (live_subregs[allocnum]); |
2af2dbdc | 4127 | |
cee784f5 | 4128 | bitmap_set_bit (live_subregs_used, allocnum); |
2af2dbdc VM |
4129 | } |
4130 | ||
4131 | /* Walk the insns of the current function and build reload_insn_chain, | |
4132 | and record register life information. */ | |
4133 | static void | |
4134 | build_insn_chain (void) | |
4135 | { | |
4136 | unsigned int i; | |
4137 | struct insn_chain **p = &reload_insn_chain; | |
4138 | basic_block bb; | |
4139 | struct insn_chain *c = NULL; | |
4140 | struct insn_chain *next = NULL; | |
0e3de1d4 TS |
4141 | auto_bitmap live_relevant_regs; |
4142 | auto_bitmap elim_regset; | |
2af2dbdc VM |
4143 | /* live_subregs is a vector used to keep accurate information about |
4144 | which hardregs are live in multiword pseudos. live_subregs and | |
4145 | live_subregs_used are indexed by pseudo number. The live_subreg | |
4146 | entry for a particular pseudo is only used if the corresponding | |
cee784f5 SB |
4147 | element is non zero in live_subregs_used. The sbitmap size of |
4148 | live_subreg[allocno] is number of bytes that the pseudo can | |
2af2dbdc VM |
4149 | occupy. */ |
4150 | sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno); | |
0e3de1d4 | 4151 | auto_bitmap live_subregs_used; |
2af2dbdc VM |
4152 | |
4153 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
4154 | if (TEST_HARD_REG_BIT (eliminable_regset, i)) | |
4155 | bitmap_set_bit (elim_regset, i); | |
4f42035e | 4156 | FOR_EACH_BB_REVERSE_FN (bb, cfun) |
2af2dbdc VM |
4157 | { |
4158 | bitmap_iterator bi; | |
070a1983 | 4159 | rtx_insn *insn; |
b8698a0f | 4160 | |
2af2dbdc | 4161 | CLEAR_REG_SET (live_relevant_regs); |
cee784f5 | 4162 | bitmap_clear (live_subregs_used); |
b8698a0f | 4163 | |
bf744527 | 4164 | EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi) |
2af2dbdc VM |
4165 | { |
4166 | if (i >= FIRST_PSEUDO_REGISTER) | |
4167 | break; | |
4168 | bitmap_set_bit (live_relevant_regs, i); | |
4169 | } | |
4170 | ||
bf744527 | 4171 | EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), |
2af2dbdc VM |
4172 | FIRST_PSEUDO_REGISTER, i, bi) |
4173 | { | |
4174 | if (pseudo_for_reload_consideration_p (i)) | |
4175 | bitmap_set_bit (live_relevant_regs, i); | |
4176 | } | |
4177 | ||
4178 | FOR_BB_INSNS_REVERSE (bb, insn) | |
4179 | { | |
4180 | if (!NOTE_P (insn) && !BARRIER_P (insn)) | |
4181 | { | |
bfac633a RS |
4182 | struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
4183 | df_ref def, use; | |
2af2dbdc VM |
4184 | |
4185 | c = new_insn_chain (); | |
4186 | c->next = next; | |
4187 | next = c; | |
4188 | *p = c; | |
4189 | p = &c->prev; | |
b8698a0f | 4190 | |
2af2dbdc VM |
4191 | c->insn = insn; |
4192 | c->block = bb->index; | |
4193 | ||
4b71920a | 4194 | if (NONDEBUG_INSN_P (insn)) |
bfac633a | 4195 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
2af2dbdc | 4196 | { |
2af2dbdc | 4197 | unsigned int regno = DF_REF_REGNO (def); |
b8698a0f | 4198 | |
2af2dbdc VM |
4199 | /* Ignore may clobbers because these are generated |
4200 | from calls. However, every other kind of def is | |
4201 | added to dead_or_set. */ | |
4202 | if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) | |
4203 | { | |
4204 | if (regno < FIRST_PSEUDO_REGISTER) | |
4205 | { | |
4206 | if (!fixed_regs[regno]) | |
4207 | bitmap_set_bit (&c->dead_or_set, regno); | |
4208 | } | |
4209 | else if (pseudo_for_reload_consideration_p (regno)) | |
4210 | bitmap_set_bit (&c->dead_or_set, regno); | |
4211 | } | |
4212 | ||
4213 | if ((regno < FIRST_PSEUDO_REGISTER | |
4214 | || reg_renumber[regno] >= 0 | |
4215 | || ira_conflicts_p) | |
4216 | && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL))) | |
4217 | { | |
4218 | rtx reg = DF_REF_REG (def); | |
9dcf1f86 RS |
4219 | HOST_WIDE_INT outer_size, inner_size, start; |
4220 | ||
4221 | /* We can usually track the liveness of individual | |
4222 | bytes within a subreg. The only exceptions are | |
4223 | subregs wrapped in ZERO_EXTRACTs and subregs whose | |
4224 | size is not known; in those cases we need to be | |
4225 | conservative and treat the definition as a partial | |
4226 | definition of the full register rather than a full | |
4227 | definition of a specific part of the register. */ | |
2af2dbdc | 4228 | if (GET_CODE (reg) == SUBREG |
9dcf1f86 RS |
4229 | && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT) |
4230 | && get_subreg_tracking_sizes (reg, &outer_size, | |
4231 | &inner_size, &start)) | |
2af2dbdc | 4232 | { |
9dcf1f86 | 4233 | HOST_WIDE_INT last = start + outer_size; |
2af2dbdc VM |
4234 | |
4235 | init_live_subregs | |
b8698a0f | 4236 | (bitmap_bit_p (live_relevant_regs, regno), |
9dcf1f86 RS |
4237 | live_subregs, live_subregs_used, regno, |
4238 | inner_size); | |
2af2dbdc VM |
4239 | |
4240 | if (!DF_REF_FLAGS_IS_SET | |
4241 | (def, DF_REF_STRICT_LOW_PART)) | |
4242 | { | |
4243 | /* Expand the range to cover entire words. | |
4244 | Bytes added here are "don't care". */ | |
4245 | start | |
4246 | = start / UNITS_PER_WORD * UNITS_PER_WORD; | |
4247 | last = ((last + UNITS_PER_WORD - 1) | |
4248 | / UNITS_PER_WORD * UNITS_PER_WORD); | |
4249 | } | |
4250 | ||
4251 | /* Ignore the paradoxical bits. */ | |
cee784f5 SB |
4252 | if (last > SBITMAP_SIZE (live_subregs[regno])) |
4253 | last = SBITMAP_SIZE (live_subregs[regno]); | |
2af2dbdc VM |
4254 | |
4255 | while (start < last) | |
4256 | { | |
d7c028c0 | 4257 | bitmap_clear_bit (live_subregs[regno], start); |
2af2dbdc VM |
4258 | start++; |
4259 | } | |
b8698a0f | 4260 | |
f61e445a | 4261 | if (bitmap_empty_p (live_subregs[regno])) |
2af2dbdc | 4262 | { |
cee784f5 | 4263 | bitmap_clear_bit (live_subregs_used, regno); |
2af2dbdc VM |
4264 | bitmap_clear_bit (live_relevant_regs, regno); |
4265 | } | |
4266 | else | |
4267 | /* Set live_relevant_regs here because | |
4268 | that bit has to be true to get us to | |
4269 | look at the live_subregs fields. */ | |
4270 | bitmap_set_bit (live_relevant_regs, regno); | |
4271 | } | |
4272 | else | |
4273 | { | |
4274 | /* DF_REF_PARTIAL is generated for | |
4275 | subregs, STRICT_LOW_PART, and | |
4276 | ZERO_EXTRACT. We handle the subreg | |
4277 | case above so here we have to keep from | |
4278 | modeling the def as a killing def. */ | |
4279 | if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL)) | |
4280 | { | |
cee784f5 | 4281 | bitmap_clear_bit (live_subregs_used, regno); |
2af2dbdc | 4282 | bitmap_clear_bit (live_relevant_regs, regno); |
2af2dbdc VM |
4283 | } |
4284 | } | |
4285 | } | |
4286 | } | |
b8698a0f | 4287 | |
2af2dbdc VM |
4288 | bitmap_and_compl_into (live_relevant_regs, elim_regset); |
4289 | bitmap_copy (&c->live_throughout, live_relevant_regs); | |
4290 | ||
4b71920a | 4291 | if (NONDEBUG_INSN_P (insn)) |
bfac633a | 4292 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
2af2dbdc | 4293 | { |
2af2dbdc VM |
4294 | unsigned int regno = DF_REF_REGNO (use); |
4295 | rtx reg = DF_REF_REG (use); | |
b8698a0f | 4296 | |
2af2dbdc VM |
4297 | /* DF_REF_READ_WRITE on a use means that this use |
4298 | is fabricated from a def that is a partial set | |
4299 | to a multiword reg. Here, we only model the | |
4300 | subreg case that is not wrapped in ZERO_EXTRACT | |
4301 | precisely so we do not need to look at the | |
2b9c63a2 | 4302 | fabricated use. */ |
b8698a0f L |
4303 | if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE) |
4304 | && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT) | |
2af2dbdc VM |
4305 | && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG)) |
4306 | continue; | |
b8698a0f | 4307 | |
2af2dbdc VM |
4308 | /* Add the last use of each var to dead_or_set. */ |
4309 | if (!bitmap_bit_p (live_relevant_regs, regno)) | |
4310 | { | |
4311 | if (regno < FIRST_PSEUDO_REGISTER) | |
4312 | { | |
4313 | if (!fixed_regs[regno]) | |
4314 | bitmap_set_bit (&c->dead_or_set, regno); | |
4315 | } | |
4316 | else if (pseudo_for_reload_consideration_p (regno)) | |
4317 | bitmap_set_bit (&c->dead_or_set, regno); | |
4318 | } | |
b8698a0f | 4319 | |
2af2dbdc VM |
4320 | if (regno < FIRST_PSEUDO_REGISTER |
4321 | || pseudo_for_reload_consideration_p (regno)) | |
4322 | { | |
9dcf1f86 | 4323 | HOST_WIDE_INT outer_size, inner_size, start; |
2af2dbdc VM |
4324 | if (GET_CODE (reg) == SUBREG |
4325 | && !DF_REF_FLAGS_IS_SET (use, | |
4326 | DF_REF_SIGN_EXTRACT | |
9dcf1f86 RS |
4327 | | DF_REF_ZERO_EXTRACT) |
4328 | && get_subreg_tracking_sizes (reg, &outer_size, | |
4329 | &inner_size, &start)) | |
2af2dbdc | 4330 | { |
9dcf1f86 | 4331 | HOST_WIDE_INT last = start + outer_size; |
b8698a0f | 4332 | |
2af2dbdc | 4333 | init_live_subregs |
b8698a0f | 4334 | (bitmap_bit_p (live_relevant_regs, regno), |
9dcf1f86 RS |
4335 | live_subregs, live_subregs_used, regno, |
4336 | inner_size); | |
b8698a0f | 4337 | |
2af2dbdc | 4338 | /* Ignore the paradoxical bits. */ |
cee784f5 SB |
4339 | if (last > SBITMAP_SIZE (live_subregs[regno])) |
4340 | last = SBITMAP_SIZE (live_subregs[regno]); | |
2af2dbdc VM |
4341 | |
4342 | while (start < last) | |
4343 | { | |
d7c028c0 | 4344 | bitmap_set_bit (live_subregs[regno], start); |
2af2dbdc VM |
4345 | start++; |
4346 | } | |
4347 | } | |
4348 | else | |
4349 | /* Resetting the live_subregs_used is | |
4350 | effectively saying do not use the subregs | |
4351 | because we are reading the whole | |
4352 | pseudo. */ | |
cee784f5 | 4353 | bitmap_clear_bit (live_subregs_used, regno); |
2af2dbdc VM |
4354 | bitmap_set_bit (live_relevant_regs, regno); |
4355 | } | |
4356 | } | |
4357 | } | |
4358 | } | |
4359 | ||
4360 | /* FIXME!! The following code is a disaster. Reload needs to see the | |
4361 | labels and jump tables that are just hanging out in between | |
4362 | the basic blocks. See pr33676. */ | |
4363 | insn = BB_HEAD (bb); | |
b8698a0f | 4364 | |
2af2dbdc | 4365 | /* Skip over the barriers and cruft. */ |
b8698a0f | 4366 | while (insn && (BARRIER_P (insn) || NOTE_P (insn) |
2af2dbdc VM |
4367 | || BLOCK_FOR_INSN (insn) == bb)) |
4368 | insn = PREV_INSN (insn); | |
b8698a0f | 4369 | |
2af2dbdc VM |
4370 | /* While we add anything except barriers and notes, the focus is |
4371 | to get the labels and jump tables into the | |
4372 | reload_insn_chain. */ | |
4373 | while (insn) | |
4374 | { | |
4375 | if (!NOTE_P (insn) && !BARRIER_P (insn)) | |
4376 | { | |
4377 | if (BLOCK_FOR_INSN (insn)) | |
4378 | break; | |
b8698a0f | 4379 | |
2af2dbdc VM |
4380 | c = new_insn_chain (); |
4381 | c->next = next; | |
4382 | next = c; | |
4383 | *p = c; | |
4384 | p = &c->prev; | |
b8698a0f | 4385 | |
2af2dbdc VM |
4386 | /* The block makes no sense here, but it is what the old |
4387 | code did. */ | |
4388 | c->block = bb->index; | |
4389 | c->insn = insn; | |
4390 | bitmap_copy (&c->live_throughout, live_relevant_regs); | |
b8698a0f | 4391 | } |
2af2dbdc VM |
4392 | insn = PREV_INSN (insn); |
4393 | } | |
4394 | } | |
4395 | ||
2af2dbdc VM |
4396 | reload_insn_chain = c; |
4397 | *p = NULL; | |
4398 | ||
cee784f5 SB |
4399 | for (i = 0; i < (unsigned int) max_regno; i++) |
4400 | if (live_subregs[i] != NULL) | |
4401 | sbitmap_free (live_subregs[i]); | |
2af2dbdc | 4402 | free (live_subregs); |
2af2dbdc VM |
4403 | |
4404 | if (dump_file) | |
4405 | print_insn_chains (dump_file); | |
4406 | } | |
acf41a74 BS |
4407 | \f |
4408 | /* Examine the rtx found in *LOC, which is read or written to as determined | |
4409 | by TYPE. Return false if we find a reason why an insn containing this | |
4410 | rtx should not be moved (such as accesses to non-constant memory), true | |
4411 | otherwise. */ | |
4412 | static bool | |
4413 | rtx_moveable_p (rtx *loc, enum op_type type) | |
4414 | { | |
4415 | const char *fmt; | |
4416 | rtx x = *loc; | |
acf41a74 BS |
4417 | int i, j; |
4418 | ||
45309d28 | 4419 | enum rtx_code code = GET_CODE (x); |
acf41a74 BS |
4420 | switch (code) |
4421 | { | |
4422 | case CONST: | |
d8116890 | 4423 | CASE_CONST_ANY: |
acf41a74 BS |
4424 | case SYMBOL_REF: |
4425 | case LABEL_REF: | |
4426 | return true; | |
4427 | ||
4428 | case PC: | |
4429 | return type == OP_IN; | |
4430 | ||
4431 | case CC0: | |
4432 | return false; | |
4433 | ||
4434 | case REG: | |
4435 | if (x == frame_pointer_rtx) | |
4436 | return true; | |
4437 | if (HARD_REGISTER_P (x)) | |
4438 | return false; | |
4439 | ||
4440 | return true; | |
4441 | ||
4442 | case MEM: | |
4443 | if (type == OP_IN && MEM_READONLY_P (x)) | |
4444 | return rtx_moveable_p (&XEXP (x, 0), OP_IN); | |
4445 | return false; | |
4446 | ||
4447 | case SET: | |
4448 | return (rtx_moveable_p (&SET_SRC (x), OP_IN) | |
4449 | && rtx_moveable_p (&SET_DEST (x), OP_OUT)); | |
4450 | ||
4451 | case STRICT_LOW_PART: | |
4452 | return rtx_moveable_p (&XEXP (x, 0), OP_OUT); | |
4453 | ||
4454 | case ZERO_EXTRACT: | |
4455 | case SIGN_EXTRACT: | |
4456 | return (rtx_moveable_p (&XEXP (x, 0), type) | |
4457 | && rtx_moveable_p (&XEXP (x, 1), OP_IN) | |
4458 | && rtx_moveable_p (&XEXP (x, 2), OP_IN)); | |
4459 | ||
4460 | case CLOBBER: | |
8df47bdf | 4461 | case CLOBBER_HIGH: |
acf41a74 BS |
4462 | return rtx_moveable_p (&SET_DEST (x), OP_OUT); |
4463 | ||
d8c16744 | 4464 | case UNSPEC_VOLATILE: |
026c3cfd | 4465 | /* It is a bad idea to consider insns with such rtl |
d8c16744 VM |
4466 | as moveable ones. The insn scheduler also considers them as barrier |
4467 | for a reason. */ | |
4468 | return false; | |
4469 | ||
9d0d0a5a SB |
4470 | case ASM_OPERANDS: |
4471 | /* The same is true for volatile asm: it has unknown side effects, it | |
4472 | cannot be moved at will. */ | |
4473 | if (MEM_VOLATILE_P (x)) | |
4474 | return false; | |
4475 | ||
acf41a74 BS |
4476 | default: |
4477 | break; | |
4478 | } | |
4479 | ||
4480 | fmt = GET_RTX_FORMAT (code); | |
4481 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
4482 | { | |
4483 | if (fmt[i] == 'e') | |
4484 | { | |
4485 | if (!rtx_moveable_p (&XEXP (x, i), type)) | |
4486 | return false; | |
4487 | } | |
4488 | else if (fmt[i] == 'E') | |
4489 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
4490 | { | |
4491 | if (!rtx_moveable_p (&XVECEXP (x, i, j), type)) | |
4492 | return false; | |
4493 | } | |
4494 | } | |
4495 | return true; | |
4496 | } | |
4497 | ||
4498 | /* A wrapper around dominated_by_p, which uses the information in UID_LUID | |
4499 | to give dominance relationships between two insns I1 and I2. */ | |
4500 | static bool | |
4501 | insn_dominated_by_p (rtx i1, rtx i2, int *uid_luid) | |
4502 | { | |
4503 | basic_block bb1 = BLOCK_FOR_INSN (i1); | |
4504 | basic_block bb2 = BLOCK_FOR_INSN (i2); | |
4505 | ||
4506 | if (bb1 == bb2) | |
4507 | return uid_luid[INSN_UID (i2)] < uid_luid[INSN_UID (i1)]; | |
4508 | return dominated_by_p (CDI_DOMINATORS, bb1, bb2); | |
4509 | } | |
4510 | ||
4511 | /* Record the range of register numbers added by find_moveable_pseudos. */ | |
4512 | int first_moveable_pseudo, last_moveable_pseudo; | |
4513 | ||
4514 | /* These two vectors hold data for every register added by | |
4515 | find_movable_pseudos, with index 0 holding data for the | |
4516 | first_moveable_pseudo. */ | |
4517 | /* The original home register. */ | |
9771b263 | 4518 | static vec<rtx> pseudo_replaced_reg; |
acf41a74 BS |
4519 | |
4520 | /* Look for instances where we have an instruction that is known to increase | |
4521 | register pressure, and whose result is not used immediately. If it is | |
4522 | possible to move the instruction downwards to just before its first use, | |
4523 | split its lifetime into two ranges. We create a new pseudo to compute the | |
4524 | value, and emit a move instruction just before the first use. If, after | |
4525 | register allocation, the new pseudo remains unallocated, the function | |
4526 | move_unallocated_pseudos then deletes the move instruction and places | |
4527 | the computation just before the first use. | |
4528 | ||
4529 | Such a move is safe and profitable if all the input registers remain live | |
4530 | and unchanged between the original computation and its first use. In such | |
4531 | a situation, the computation is known to increase register pressure, and | |
4532 | moving it is known to at least not worsen it. | |
4533 | ||
4534 | We restrict moves to only those cases where a register remains unallocated, | |
4535 | in order to avoid interfering too much with the instruction schedule. As | |
4536 | an exception, we may move insns which only modify their input register | |
4537 | (typically induction variables), as this increases the freedom for our | |
4538 | intended transformation, and does not limit the second instruction | |
4539 | scheduler pass. */ | |
4540 | ||
4541 | static void | |
4542 | find_moveable_pseudos (void) | |
4543 | { | |
4544 | unsigned i; | |
4545 | int max_regs = max_reg_num (); | |
4546 | int max_uid = get_max_uid (); | |
4547 | basic_block bb; | |
4548 | int *uid_luid = XNEWVEC (int, max_uid); | |
070a1983 | 4549 | rtx_insn **closest_uses = XNEWVEC (rtx_insn *, max_regs); |
acf41a74 | 4550 | /* A set of registers which are live but not modified throughout a block. */ |
8b1c6fd7 DM |
4551 | bitmap_head *bb_transp_live = XNEWVEC (bitmap_head, |
4552 | last_basic_block_for_fn (cfun)); | |
acf41a74 | 4553 | /* A set of registers which only exist in a given basic block. */ |
8b1c6fd7 DM |
4554 | bitmap_head *bb_local = XNEWVEC (bitmap_head, |
4555 | last_basic_block_for_fn (cfun)); | |
acf41a74 BS |
4556 | /* A set of registers which are set once, in an instruction that can be |
4557 | moved freely downwards, but are otherwise transparent to a block. */ | |
8b1c6fd7 DM |
4558 | bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head, |
4559 | last_basic_block_for_fn (cfun)); | |
8f9b31f7 | 4560 | auto_bitmap live, used, set, interesting, unusable_as_input; |
acf41a74 | 4561 | bitmap_iterator bi; |
acf41a74 BS |
4562 | |
4563 | first_moveable_pseudo = max_regs; | |
9771b263 DN |
4564 | pseudo_replaced_reg.release (); |
4565 | pseudo_replaced_reg.safe_grow_cleared (max_regs); | |
acf41a74 | 4566 | |
2d73cc45 MJ |
4567 | df_analyze (); |
4568 | calculate_dominance_info (CDI_DOMINATORS); | |
4569 | ||
acf41a74 | 4570 | i = 0; |
11cd3bed | 4571 | FOR_EACH_BB_FN (bb, cfun) |
acf41a74 | 4572 | { |
070a1983 | 4573 | rtx_insn *insn; |
acf41a74 BS |
4574 | bitmap transp = bb_transp_live + bb->index; |
4575 | bitmap moveable = bb_moveable_reg_sets + bb->index; | |
4576 | bitmap local = bb_local + bb->index; | |
4577 | ||
4578 | bitmap_initialize (local, 0); | |
4579 | bitmap_initialize (transp, 0); | |
4580 | bitmap_initialize (moveable, 0); | |
8f9b31f7 TS |
4581 | bitmap_copy (live, df_get_live_out (bb)); |
4582 | bitmap_and_into (live, df_get_live_in (bb)); | |
4583 | bitmap_copy (transp, live); | |
acf41a74 | 4584 | bitmap_clear (moveable); |
8f9b31f7 TS |
4585 | bitmap_clear (live); |
4586 | bitmap_clear (used); | |
4587 | bitmap_clear (set); | |
acf41a74 BS |
4588 | FOR_BB_INSNS (bb, insn) |
4589 | if (NONDEBUG_INSN_P (insn)) | |
4590 | { | |
bfac633a | 4591 | df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
bfac633a | 4592 | df_ref def, use; |
acf41a74 BS |
4593 | |
4594 | uid_luid[INSN_UID (insn)] = i++; | |
4595 | ||
74e59b6c RS |
4596 | def = df_single_def (insn_info); |
4597 | use = df_single_use (insn_info); | |
4598 | if (use | |
4599 | && def | |
4600 | && DF_REF_REGNO (use) == DF_REF_REGNO (def) | |
8f9b31f7 | 4601 | && !bitmap_bit_p (set, DF_REF_REGNO (use)) |
acf41a74 BS |
4602 | && rtx_moveable_p (&PATTERN (insn), OP_IN)) |
4603 | { | |
74e59b6c | 4604 | unsigned regno = DF_REF_REGNO (use); |
acf41a74 | 4605 | bitmap_set_bit (moveable, regno); |
8f9b31f7 TS |
4606 | bitmap_set_bit (set, regno); |
4607 | bitmap_set_bit (used, regno); | |
acf41a74 BS |
4608 | bitmap_clear_bit (transp, regno); |
4609 | continue; | |
4610 | } | |
bfac633a | 4611 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
acf41a74 | 4612 | { |
bfac633a | 4613 | unsigned regno = DF_REF_REGNO (use); |
8f9b31f7 | 4614 | bitmap_set_bit (used, regno); |
acf41a74 BS |
4615 | if (bitmap_clear_bit (moveable, regno)) |
4616 | bitmap_clear_bit (transp, regno); | |
acf41a74 BS |
4617 | } |
4618 | ||
bfac633a | 4619 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
acf41a74 | 4620 | { |
bfac633a | 4621 | unsigned regno = DF_REF_REGNO (def); |
8f9b31f7 | 4622 | bitmap_set_bit (set, regno); |
acf41a74 BS |
4623 | bitmap_clear_bit (transp, regno); |
4624 | bitmap_clear_bit (moveable, regno); | |
acf41a74 BS |
4625 | } |
4626 | } | |
4627 | } | |
4628 | ||
11cd3bed | 4629 | FOR_EACH_BB_FN (bb, cfun) |
acf41a74 BS |
4630 | { |
4631 | bitmap local = bb_local + bb->index; | |
070a1983 | 4632 | rtx_insn *insn; |
acf41a74 BS |
4633 | |
4634 | FOR_BB_INSNS (bb, insn) | |
4635 | if (NONDEBUG_INSN_P (insn)) | |
4636 | { | |
74e59b6c | 4637 | df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
070a1983 DM |
4638 | rtx_insn *def_insn; |
4639 | rtx closest_use, note; | |
74e59b6c | 4640 | df_ref def, use; |
acf41a74 BS |
4641 | unsigned regno; |
4642 | bool all_dominated, all_local; | |
ef4bddc2 | 4643 | machine_mode mode; |
acf41a74 | 4644 | |
74e59b6c | 4645 | def = df_single_def (insn_info); |
acf41a74 | 4646 | /* There must be exactly one def in this insn. */ |
74e59b6c | 4647 | if (!def || !single_set (insn)) |
acf41a74 BS |
4648 | continue; |
4649 | /* This must be the only definition of the reg. We also limit | |
4650 | which modes we deal with so that we can assume we can generate | |
4651 | move instructions. */ | |
4652 | regno = DF_REF_REGNO (def); | |
4653 | mode = GET_MODE (DF_REF_REG (def)); | |
4654 | if (DF_REG_DEF_COUNT (regno) != 1 | |
4655 | || !DF_REF_INSN_INFO (def) | |
4656 | || HARD_REGISTER_NUM_P (regno) | |
aa44c80c | 4657 | || DF_REG_EQ_USE_COUNT (regno) > 0 |
acf41a74 BS |
4658 | || (!INTEGRAL_MODE_P (mode) && !FLOAT_MODE_P (mode))) |
4659 | continue; | |
4660 | def_insn = DF_REF_INSN (def); | |
4661 | ||
4662 | for (note = REG_NOTES (def_insn); note; note = XEXP (note, 1)) | |
4663 | if (REG_NOTE_KIND (note) == REG_EQUIV && MEM_P (XEXP (note, 0))) | |
4664 | break; | |
4665 | ||
4666 | if (note) | |
4667 | { | |
4668 | if (dump_file) | |
4669 | fprintf (dump_file, "Ignoring reg %d, has equiv memory\n", | |
4670 | regno); | |
8f9b31f7 | 4671 | bitmap_set_bit (unusable_as_input, regno); |
acf41a74 BS |
4672 | continue; |
4673 | } | |
4674 | ||
4675 | use = DF_REG_USE_CHAIN (regno); | |
4676 | all_dominated = true; | |
4677 | all_local = true; | |
4678 | closest_use = NULL_RTX; | |
4679 | for (; use; use = DF_REF_NEXT_REG (use)) | |
4680 | { | |
070a1983 | 4681 | rtx_insn *insn; |
acf41a74 BS |
4682 | if (!DF_REF_INSN_INFO (use)) |
4683 | { | |
4684 | all_dominated = false; | |
4685 | all_local = false; | |
4686 | break; | |
4687 | } | |
4688 | insn = DF_REF_INSN (use); | |
4689 | if (DEBUG_INSN_P (insn)) | |
4690 | continue; | |
4691 | if (BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (def_insn)) | |
4692 | all_local = false; | |
4693 | if (!insn_dominated_by_p (insn, def_insn, uid_luid)) | |
4694 | all_dominated = false; | |
4695 | if (closest_use != insn && closest_use != const0_rtx) | |
4696 | { | |
4697 | if (closest_use == NULL_RTX) | |
4698 | closest_use = insn; | |
4699 | else if (insn_dominated_by_p (closest_use, insn, uid_luid)) | |
4700 | closest_use = insn; | |
4701 | else if (!insn_dominated_by_p (insn, closest_use, uid_luid)) | |
4702 | closest_use = const0_rtx; | |
4703 | } | |
4704 | } | |
4705 | if (!all_dominated) | |
4706 | { | |
4707 | if (dump_file) | |
4708 | fprintf (dump_file, "Reg %d not all uses dominated by set\n", | |
4709 | regno); | |
4710 | continue; | |
4711 | } | |
4712 | if (all_local) | |
4713 | bitmap_set_bit (local, regno); | |
4714 | if (closest_use == const0_rtx || closest_use == NULL | |
4715 | || next_nonnote_nondebug_insn (def_insn) == closest_use) | |
4716 | { | |
4717 | if (dump_file) | |
4718 | fprintf (dump_file, "Reg %d uninteresting%s\n", regno, | |
4719 | closest_use == const0_rtx || closest_use == NULL | |
4720 | ? " (no unique first use)" : ""); | |
4721 | continue; | |
4722 | } | |
058eb3b0 | 4723 | if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (closest_use))) |
acf41a74 BS |
4724 | { |
4725 | if (dump_file) | |
4726 | fprintf (dump_file, "Reg %d: closest user uses cc0\n", | |
4727 | regno); | |
4728 | continue; | |
4729 | } | |
058eb3b0 | 4730 | |
8f9b31f7 | 4731 | bitmap_set_bit (interesting, regno); |
070a1983 DM |
4732 | /* If we get here, we know closest_use is a non-NULL insn |
4733 | (as opposed to const_0_rtx). */ | |
4734 | closest_uses[regno] = as_a <rtx_insn *> (closest_use); | |
acf41a74 BS |
4735 | |
4736 | if (dump_file && (all_local || all_dominated)) | |
4737 | { | |
4738 | fprintf (dump_file, "Reg %u:", regno); | |
4739 | if (all_local) | |
4740 | fprintf (dump_file, " local to bb %d", bb->index); | |
4741 | if (all_dominated) | |
4742 | fprintf (dump_file, " def dominates all uses"); | |
4743 | if (closest_use != const0_rtx) | |
4744 | fprintf (dump_file, " has unique first use"); | |
4745 | fputs ("\n", dump_file); | |
4746 | } | |
4747 | } | |
4748 | } | |
4749 | ||
8f9b31f7 | 4750 | EXECUTE_IF_SET_IN_BITMAP (interesting, 0, i, bi) |
acf41a74 BS |
4751 | { |
4752 | df_ref def = DF_REG_DEF_CHAIN (i); | |
070a1983 | 4753 | rtx_insn *def_insn = DF_REF_INSN (def); |
acf41a74 BS |
4754 | basic_block def_block = BLOCK_FOR_INSN (def_insn); |
4755 | bitmap def_bb_local = bb_local + def_block->index; | |
4756 | bitmap def_bb_moveable = bb_moveable_reg_sets + def_block->index; | |
4757 | bitmap def_bb_transp = bb_transp_live + def_block->index; | |
4758 | bool local_to_bb_p = bitmap_bit_p (def_bb_local, i); | |
070a1983 | 4759 | rtx_insn *use_insn = closest_uses[i]; |
bfac633a | 4760 | df_ref use; |
acf41a74 BS |
4761 | bool all_ok = true; |
4762 | bool all_transp = true; | |
4763 | ||
4764 | if (!REG_P (DF_REF_REG (def))) | |
4765 | continue; | |
4766 | ||
4767 | if (!local_to_bb_p) | |
4768 | { | |
4769 | if (dump_file) | |
4770 | fprintf (dump_file, "Reg %u not local to one basic block\n", | |
4771 | i); | |
4772 | continue; | |
4773 | } | |
4774 | if (reg_equiv_init (i) != NULL_RTX) | |
4775 | { | |
4776 | if (dump_file) | |
4777 | fprintf (dump_file, "Ignoring reg %u with equiv init insn\n", | |
4778 | i); | |
4779 | continue; | |
4780 | } | |
4781 | if (!rtx_moveable_p (&PATTERN (def_insn), OP_IN)) | |
4782 | { | |
4783 | if (dump_file) | |
4784 | fprintf (dump_file, "Found def insn %d for %d to be not moveable\n", | |
4785 | INSN_UID (def_insn), i); | |
4786 | continue; | |
4787 | } | |
4788 | if (dump_file) | |
4789 | fprintf (dump_file, "Examining insn %d, def for %d\n", | |
4790 | INSN_UID (def_insn), i); | |
bfac633a | 4791 | FOR_EACH_INSN_USE (use, def_insn) |
acf41a74 | 4792 | { |
acf41a74 | 4793 | unsigned regno = DF_REF_REGNO (use); |
8f9b31f7 | 4794 | if (bitmap_bit_p (unusable_as_input, regno)) |
acf41a74 BS |
4795 | { |
4796 | all_ok = false; | |
4797 | if (dump_file) | |
4798 | fprintf (dump_file, " found unusable input reg %u.\n", regno); | |
4799 | break; | |
4800 | } | |
4801 | if (!bitmap_bit_p (def_bb_transp, regno)) | |
4802 | { | |
4803 | if (bitmap_bit_p (def_bb_moveable, regno) | |
4804 | && !control_flow_insn_p (use_insn) | |
618f4073 | 4805 | && (!HAVE_cc0 || !sets_cc0_p (use_insn))) |
acf41a74 BS |
4806 | { |
4807 | if (modified_between_p (DF_REF_REG (use), def_insn, use_insn)) | |
4808 | { | |
070a1983 | 4809 | rtx_insn *x = NEXT_INSN (def_insn); |
acf41a74 BS |
4810 | while (!modified_in_p (DF_REF_REG (use), x)) |
4811 | { | |
4812 | gcc_assert (x != use_insn); | |
4813 | x = NEXT_INSN (x); | |
4814 | } | |
4815 | if (dump_file) | |
4816 | fprintf (dump_file, " input reg %u modified but insn %d moveable\n", | |
4817 | regno, INSN_UID (x)); | |
4818 | emit_insn_after (PATTERN (x), use_insn); | |
4819 | set_insn_deleted (x); | |
4820 | } | |
4821 | else | |
4822 | { | |
4823 | if (dump_file) | |
4824 | fprintf (dump_file, " input reg %u modified between def and use\n", | |
4825 | regno); | |
4826 | all_transp = false; | |
4827 | } | |
4828 | } | |
4829 | else | |
4830 | all_transp = false; | |
4831 | } | |
acf41a74 BS |
4832 | } |
4833 | if (!all_ok) | |
4834 | continue; | |
4835 | if (!dbg_cnt (ira_move)) | |
4836 | break; | |
4837 | if (dump_file) | |
4838 | fprintf (dump_file, " all ok%s\n", all_transp ? " and transp" : ""); | |
4839 | ||
4840 | if (all_transp) | |
4841 | { | |
4842 | rtx def_reg = DF_REF_REG (def); | |
4843 | rtx newreg = ira_create_new_reg (def_reg); | |
9e3de74c | 4844 | if (validate_change (def_insn, DF_REF_REAL_LOC (def), newreg, 0)) |
acf41a74 BS |
4845 | { |
4846 | unsigned nregno = REGNO (newreg); | |
a36b2706 | 4847 | emit_insn_before (gen_move_insn (def_reg, newreg), use_insn); |
acf41a74 | 4848 | nregno -= max_regs; |
9771b263 | 4849 | pseudo_replaced_reg[nregno] = def_reg; |
acf41a74 BS |
4850 | } |
4851 | } | |
4852 | } | |
4853 | ||
11cd3bed | 4854 | FOR_EACH_BB_FN (bb, cfun) |
acf41a74 BS |
4855 | { |
4856 | bitmap_clear (bb_local + bb->index); | |
4857 | bitmap_clear (bb_transp_live + bb->index); | |
4858 | bitmap_clear (bb_moveable_reg_sets + bb->index); | |
4859 | } | |
acf41a74 BS |
4860 | free (uid_luid); |
4861 | free (closest_uses); | |
4862 | free (bb_local); | |
4863 | free (bb_transp_live); | |
4864 | free (bb_moveable_reg_sets); | |
4865 | ||
4866 | last_moveable_pseudo = max_reg_num (); | |
2d73cc45 MJ |
4867 | |
4868 | fix_reg_equiv_init (); | |
4869 | expand_reg_info (); | |
4870 | regstat_free_n_sets_and_refs (); | |
4871 | regstat_free_ri (); | |
4872 | regstat_init_n_sets_and_refs (); | |
4873 | regstat_compute_ri (); | |
4874 | free_dominance_info (CDI_DOMINATORS); | |
732dad8f | 4875 | } |
acf41a74 | 4876 | |
3e749749 MJ |
4877 | /* If SET pattern SET is an assignment from a hard register to a pseudo which |
4878 | is live at CALL_DOM (if non-NULL, otherwise this check is omitted), return | |
4879 | the destination. Otherwise return NULL. */ | |
732dad8f MJ |
4880 | |
4881 | static rtx | |
3e749749 | 4882 | interesting_dest_for_shprep_1 (rtx set, basic_block call_dom) |
732dad8f | 4883 | { |
732dad8f MJ |
4884 | rtx src = SET_SRC (set); |
4885 | rtx dest = SET_DEST (set); | |
4886 | if (!REG_P (src) || !HARD_REGISTER_P (src) | |
4887 | || !REG_P (dest) || HARD_REGISTER_P (dest) | |
4888 | || (call_dom && !bitmap_bit_p (df_get_live_in (call_dom), REGNO (dest)))) | |
4889 | return NULL; | |
4890 | return dest; | |
4891 | } | |
4892 | ||
df3e3493 | 4893 | /* If insn is interesting for parameter range-splitting shrink-wrapping |
3e749749 MJ |
4894 | preparation, i.e. it is a single set from a hard register to a pseudo, which |
4895 | is live at CALL_DOM (if non-NULL, otherwise this check is omitted), or a | |
4896 | parallel statement with only one such statement, return the destination. | |
4897 | Otherwise return NULL. */ | |
4898 | ||
4899 | static rtx | |
070a1983 | 4900 | interesting_dest_for_shprep (rtx_insn *insn, basic_block call_dom) |
3e749749 MJ |
4901 | { |
4902 | if (!INSN_P (insn)) | |
4903 | return NULL; | |
4904 | rtx pat = PATTERN (insn); | |
4905 | if (GET_CODE (pat) == SET) | |
4906 | return interesting_dest_for_shprep_1 (pat, call_dom); | |
4907 | ||
4908 | if (GET_CODE (pat) != PARALLEL) | |
4909 | return NULL; | |
4910 | rtx ret = NULL; | |
4911 | for (int i = 0; i < XVECLEN (pat, 0); i++) | |
4912 | { | |
4913 | rtx sub = XVECEXP (pat, 0, i); | |
8df47bdf AH |
4914 | if (GET_CODE (sub) == USE |
4915 | || GET_CODE (sub) == CLOBBER | |
4916 | || GET_CODE (sub) == CLOBBER_HIGH) | |
3e749749 MJ |
4917 | continue; |
4918 | if (GET_CODE (sub) != SET | |
4919 | || side_effects_p (sub)) | |
4920 | return NULL; | |
4921 | rtx dest = interesting_dest_for_shprep_1 (sub, call_dom); | |
4922 | if (dest && ret) | |
4923 | return NULL; | |
4924 | if (dest) | |
4925 | ret = dest; | |
4926 | } | |
4927 | return ret; | |
4928 | } | |
4929 | ||
732dad8f MJ |
4930 | /* Split live ranges of pseudos that are loaded from hard registers in the |
4931 | first BB in a BB that dominates all non-sibling call if such a BB can be | |
4932 | found and is not in a loop. Return true if the function has made any | |
4933 | changes. */ | |
4934 | ||
4935 | static bool | |
4936 | split_live_ranges_for_shrink_wrap (void) | |
4937 | { | |
4938 | basic_block bb, call_dom = NULL; | |
fefa31b5 | 4939 | basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
070a1983 | 4940 | rtx_insn *insn, *last_interesting_insn = NULL; |
8f9b31f7 | 4941 | auto_bitmap need_new, reachable; |
732dad8f MJ |
4942 | vec<basic_block> queue; |
4943 | ||
a5e022d5 | 4944 | if (!SHRINK_WRAPPING_ENABLED) |
732dad8f MJ |
4945 | return false; |
4946 | ||
0cae8d31 | 4947 | queue.create (n_basic_blocks_for_fn (cfun)); |
732dad8f | 4948 | |
11cd3bed | 4949 | FOR_EACH_BB_FN (bb, cfun) |
732dad8f MJ |
4950 | FOR_BB_INSNS (bb, insn) |
4951 | if (CALL_P (insn) && !SIBLING_CALL_P (insn)) | |
4952 | { | |
4953 | if (bb == first) | |
4954 | { | |
732dad8f MJ |
4955 | queue.release (); |
4956 | return false; | |
4957 | } | |
4958 | ||
8f9b31f7 TS |
4959 | bitmap_set_bit (need_new, bb->index); |
4960 | bitmap_set_bit (reachable, bb->index); | |
732dad8f MJ |
4961 | queue.quick_push (bb); |
4962 | break; | |
4963 | } | |
4964 | ||
4965 | if (queue.is_empty ()) | |
4966 | { | |
732dad8f MJ |
4967 | queue.release (); |
4968 | return false; | |
4969 | } | |
4970 | ||
4971 | while (!queue.is_empty ()) | |
4972 | { | |
4973 | edge e; | |
4974 | edge_iterator ei; | |
4975 | ||
4976 | bb = queue.pop (); | |
4977 | FOR_EACH_EDGE (e, ei, bb->succs) | |
fefa31b5 | 4978 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) |
8f9b31f7 | 4979 | && bitmap_set_bit (reachable, e->dest->index)) |
732dad8f MJ |
4980 | queue.quick_push (e->dest); |
4981 | } | |
4982 | queue.release (); | |
4983 | ||
4984 | FOR_BB_INSNS (first, insn) | |
4985 | { | |
4986 | rtx dest = interesting_dest_for_shprep (insn, NULL); | |
4987 | if (!dest) | |
4988 | continue; | |
4989 | ||
4990 | if (DF_REG_DEF_COUNT (REGNO (dest)) > 1) | |
8f9b31f7 | 4991 | return false; |
732dad8f MJ |
4992 | |
4993 | for (df_ref use = DF_REG_USE_CHAIN (REGNO(dest)); | |
4994 | use; | |
4995 | use = DF_REF_NEXT_REG (use)) | |
4996 | { | |
732dad8f | 4997 | int ubbi = DF_REF_BB (use)->index; |
8f9b31f7 TS |
4998 | if (bitmap_bit_p (reachable, ubbi)) |
4999 | bitmap_set_bit (need_new, ubbi); | |
732dad8f MJ |
5000 | } |
5001 | last_interesting_insn = insn; | |
5002 | } | |
5003 | ||
732dad8f | 5004 | if (!last_interesting_insn) |
8f9b31f7 | 5005 | return false; |
732dad8f | 5006 | |
8f9b31f7 | 5007 | call_dom = nearest_common_dominator_for_set (CDI_DOMINATORS, need_new); |
732dad8f MJ |
5008 | if (call_dom == first) |
5009 | return false; | |
5010 | ||
5011 | loop_optimizer_init (AVOID_CFG_MODIFICATIONS); | |
5012 | while (bb_loop_depth (call_dom) > 0) | |
5013 | call_dom = get_immediate_dominator (CDI_DOMINATORS, call_dom); | |
5014 | loop_optimizer_finalize (); | |
5015 | ||
5016 | if (call_dom == first) | |
5017 | return false; | |
5018 | ||
5019 | calculate_dominance_info (CDI_POST_DOMINATORS); | |
5020 | if (dominated_by_p (CDI_POST_DOMINATORS, first, call_dom)) | |
5021 | { | |
5022 | free_dominance_info (CDI_POST_DOMINATORS); | |
5023 | return false; | |
5024 | } | |
5025 | free_dominance_info (CDI_POST_DOMINATORS); | |
5026 | ||
5027 | if (dump_file) | |
5028 | fprintf (dump_file, "Will split live ranges of parameters at BB %i\n", | |
5029 | call_dom->index); | |
5030 | ||
5031 | bool ret = false; | |
5032 | FOR_BB_INSNS (first, insn) | |
5033 | { | |
5034 | rtx dest = interesting_dest_for_shprep (insn, call_dom); | |
bcb21886 | 5035 | if (!dest || dest == pic_offset_table_rtx) |
732dad8f MJ |
5036 | continue; |
5037 | ||
fd1ca3fe | 5038 | bool need_newreg = false; |
732dad8f | 5039 | df_ref use, next; |
9e3de74c | 5040 | for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next) |
732dad8f | 5041 | { |
070a1983 | 5042 | rtx_insn *uin = DF_REF_INSN (use); |
732dad8f MJ |
5043 | next = DF_REF_NEXT_REG (use); |
5044 | ||
fd1ca3fe SB |
5045 | if (DEBUG_INSN_P (uin)) |
5046 | continue; | |
5047 | ||
732dad8f MJ |
5048 | basic_block ubb = BLOCK_FOR_INSN (uin); |
5049 | if (ubb == call_dom | |
5050 | || dominated_by_p (CDI_DOMINATORS, ubb, call_dom)) | |
5051 | { | |
fd1ca3fe SB |
5052 | need_newreg = true; |
5053 | break; | |
732dad8f MJ |
5054 | } |
5055 | } | |
5056 | ||
fd1ca3fe | 5057 | if (need_newreg) |
732dad8f | 5058 | { |
fd1ca3fe SB |
5059 | rtx newreg = ira_create_new_reg (dest); |
5060 | ||
5061 | for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next) | |
5062 | { | |
5063 | rtx_insn *uin = DF_REF_INSN (use); | |
5064 | next = DF_REF_NEXT_REG (use); | |
5065 | ||
5066 | basic_block ubb = BLOCK_FOR_INSN (uin); | |
5067 | if (ubb == call_dom | |
5068 | || dominated_by_p (CDI_DOMINATORS, ubb, call_dom)) | |
5069 | validate_change (uin, DF_REF_REAL_LOC (use), newreg, true); | |
5070 | } | |
5071 | ||
1476d1bd | 5072 | rtx_insn *new_move = gen_move_insn (newreg, dest); |
732dad8f MJ |
5073 | emit_insn_after (new_move, bb_note (call_dom)); |
5074 | if (dump_file) | |
5075 | { | |
5076 | fprintf (dump_file, "Split live-range of register "); | |
5077 | print_rtl_single (dump_file, dest); | |
5078 | } | |
5079 | ret = true; | |
5080 | } | |
5081 | ||
5082 | if (insn == last_interesting_insn) | |
5083 | break; | |
5084 | } | |
5085 | apply_change_group (); | |
5086 | return ret; | |
acf41a74 | 5087 | } |
8ff49c29 | 5088 | |
acf41a74 BS |
5089 | /* Perform the second half of the transformation started in |
5090 | find_moveable_pseudos. We look for instances where the newly introduced | |
5091 | pseudo remains unallocated, and remove it by moving the definition to | |
5092 | just before its use, replacing the move instruction generated by | |
5093 | find_moveable_pseudos. */ | |
5094 | static void | |
5095 | move_unallocated_pseudos (void) | |
5096 | { | |
5097 | int i; | |
5098 | for (i = first_moveable_pseudo; i < last_moveable_pseudo; i++) | |
5099 | if (reg_renumber[i] < 0) | |
5100 | { | |
acf41a74 | 5101 | int idx = i - first_moveable_pseudo; |
9771b263 | 5102 | rtx other_reg = pseudo_replaced_reg[idx]; |
070a1983 | 5103 | rtx_insn *def_insn = DF_REF_INSN (DF_REG_DEF_CHAIN (i)); |
a36b2706 RS |
5104 | /* The use must follow all definitions of OTHER_REG, so we can |
5105 | insert the new definition immediately after any of them. */ | |
5106 | df_ref other_def = DF_REG_DEF_CHAIN (REGNO (other_reg)); | |
070a1983 DM |
5107 | rtx_insn *move_insn = DF_REF_INSN (other_def); |
5108 | rtx_insn *newinsn = emit_insn_after (PATTERN (def_insn), move_insn); | |
a36b2706 | 5109 | rtx set; |
acf41a74 BS |
5110 | int success; |
5111 | ||
5112 | if (dump_file) | |
5113 | fprintf (dump_file, "moving def of %d (insn %d now) ", | |
5114 | REGNO (other_reg), INSN_UID (def_insn)); | |
5115 | ||
a36b2706 RS |
5116 | delete_insn (move_insn); |
5117 | while ((other_def = DF_REG_DEF_CHAIN (REGNO (other_reg)))) | |
5118 | delete_insn (DF_REF_INSN (other_def)); | |
5119 | delete_insn (def_insn); | |
5120 | ||
acf41a74 BS |
5121 | set = single_set (newinsn); |
5122 | success = validate_change (newinsn, &SET_DEST (set), other_reg, 0); | |
5123 | gcc_assert (success); | |
5124 | if (dump_file) | |
5125 | fprintf (dump_file, " %d) rather than keep unallocated replacement %d\n", | |
5126 | INSN_UID (newinsn), i); | |
acf41a74 BS |
5127 | SET_REG_N_REFS (i, 0); |
5128 | } | |
5129 | } | |
f2034d06 | 5130 | \f |
6399c0ab SB |
5131 | /* If the backend knows where to allocate pseudos for hard |
5132 | register initial values, register these allocations now. */ | |
a932fb89 | 5133 | static void |
6399c0ab SB |
5134 | allocate_initial_values (void) |
5135 | { | |
5136 | if (targetm.allocate_initial_value) | |
5137 | { | |
5138 | rtx hreg, preg, x; | |
5139 | int i, regno; | |
5140 | ||
5141 | for (i = 0; HARD_REGISTER_NUM_P (i); i++) | |
5142 | { | |
5143 | if (! initial_value_entry (i, &hreg, &preg)) | |
5144 | break; | |
5145 | ||
5146 | x = targetm.allocate_initial_value (hreg); | |
5147 | regno = REGNO (preg); | |
5148 | if (x && REG_N_SETS (regno) <= 1) | |
5149 | { | |
5150 | if (MEM_P (x)) | |
5151 | reg_equiv_memory_loc (regno) = x; | |
5152 | else | |
5153 | { | |
5154 | basic_block bb; | |
5155 | int new_regno; | |
5156 | ||
5157 | gcc_assert (REG_P (x)); | |
5158 | new_regno = REGNO (x); | |
5159 | reg_renumber[regno] = new_regno; | |
5160 | /* Poke the regno right into regno_reg_rtx so that even | |
5161 | fixed regs are accepted. */ | |
5162 | SET_REGNO (preg, new_regno); | |
5163 | /* Update global register liveness information. */ | |
11cd3bed | 5164 | FOR_EACH_BB_FN (bb, cfun) |
6399c0ab | 5165 | { |
c3284718 | 5166 | if (REGNO_REG_SET_P (df_get_live_in (bb), regno)) |
6399c0ab | 5167 | SET_REGNO_REG_SET (df_get_live_in (bb), new_regno); |
c3284718 | 5168 | if (REGNO_REG_SET_P (df_get_live_out (bb), regno)) |
6399c0ab SB |
5169 | SET_REGNO_REG_SET (df_get_live_out (bb), new_regno); |
5170 | } | |
5171 | } | |
5172 | } | |
5173 | } | |
2af2dbdc | 5174 | |
6399c0ab SB |
5175 | gcc_checking_assert (! initial_value_entry (FIRST_PSEUDO_REGISTER, |
5176 | &hreg, &preg)); | |
5177 | } | |
5178 | } | |
5179 | \f | |
55a2c322 VM |
5180 | |
5181 | /* True when we use LRA instead of reload pass for the current | |
5182 | function. */ | |
5183 | bool ira_use_lra_p; | |
5184 | ||
311aab06 VM |
5185 | /* True if we have allocno conflicts. It is false for non-optimized |
5186 | mode or when the conflict table is too big. */ | |
5187 | bool ira_conflicts_p; | |
5188 | ||
ae2b9cb6 BS |
5189 | /* Saved between IRA and reload. */ |
5190 | static int saved_flag_ira_share_spill_slots; | |
5191 | ||
058e97ec VM |
5192 | /* This is the main entry of IRA. */ |
5193 | static void | |
5194 | ira (FILE *f) | |
5195 | { | |
058e97ec | 5196 | bool loops_p; |
70cc3288 | 5197 | int ira_max_point_before_emit; |
55a2c322 VM |
5198 | bool saved_flag_caller_saves = flag_caller_saves; |
5199 | enum ira_region saved_flag_ira_region = flag_ira_region; | |
891f31f9 AK |
5200 | unsigned int i; |
5201 | int num_used_regs = 0; | |
55a2c322 | 5202 | |
62869a1c RB |
5203 | clear_bb_flags (); |
5204 | ||
0064f49e WD |
5205 | /* Determine if the current function is a leaf before running IRA |
5206 | since this can impact optimizations done by the prologue and | |
5207 | epilogue thus changing register elimination offsets. | |
5208 | Other target callbacks may use crtl->is_leaf too, including | |
5209 | SHRINK_WRAPPING_ENABLED, so initialize as early as possible. */ | |
5210 | crtl->is_leaf = leaf_function_p (); | |
5211 | ||
bcb21886 KY |
5212 | /* Perform target specific PIC register initialization. */ |
5213 | targetm.init_pic_reg (); | |
5214 | ||
55a2c322 VM |
5215 | ira_conflicts_p = optimize > 0; |
5216 | ||
891f31f9 AK |
5217 | /* Determine the number of pseudos actually requiring coloring. */ |
5218 | for (i = FIRST_PSEUDO_REGISTER; i < DF_REG_SIZE (df); i++) | |
5219 | num_used_regs += !!(DF_REG_USE_COUNT (i) + DF_REG_DEF_COUNT (i)); | |
5220 | ||
55a2c322 VM |
5221 | /* If there are too many pseudos and/or basic blocks (e.g. 10K |
5222 | pseudos and 10K blocks or 100K pseudos and 1K blocks), we will | |
5223 | use simplified and faster algorithms in LRA. */ | |
5224 | lra_simple_p | |
8b1c6fd7 | 5225 | = (ira_use_lra_p |
891f31f9 AK |
5226 | && num_used_regs >= (1 << 26) / last_basic_block_for_fn (cfun)); |
5227 | ||
55a2c322 VM |
5228 | if (lra_simple_p) |
5229 | { | |
5230 | /* It permits to skip live range splitting in LRA. */ | |
5231 | flag_caller_saves = false; | |
5232 | /* There is no sense to do regional allocation when we use | |
5233 | simplified LRA. */ | |
5234 | flag_ira_region = IRA_REGION_ONE; | |
5235 | ira_conflicts_p = false; | |
5236 | } | |
5237 | ||
5238 | #ifndef IRA_NO_OBSTACK | |
5239 | gcc_obstack_init (&ira_obstack); | |
5240 | #endif | |
5241 | bitmap_obstack_initialize (&ira_bitmap_obstack); | |
058e97ec | 5242 | |
001010df KC |
5243 | /* LRA uses its own infrastructure to handle caller save registers. */ |
5244 | if (flag_caller_saves && !ira_use_lra_p) | |
dc12b70e JZ |
5245 | init_caller_save (); |
5246 | ||
058e97ec VM |
5247 | if (flag_ira_verbose < 10) |
5248 | { | |
5249 | internal_flag_ira_verbose = flag_ira_verbose; | |
5250 | ira_dump_file = f; | |
5251 | } | |
5252 | else | |
5253 | { | |
5254 | internal_flag_ira_verbose = flag_ira_verbose - 10; | |
5255 | ira_dump_file = stderr; | |
5256 | } | |
5257 | ||
5258 | setup_prohibited_mode_move_regs (); | |
3b6d1699 | 5259 | decrease_live_ranges_number (); |
058e97ec | 5260 | df_note_add_problem (); |
5d517141 SB |
5261 | |
5262 | /* DF_LIVE can't be used in the register allocator, too many other | |
5263 | parts of the compiler depend on using the "classic" liveness | |
5264 | interpretation of the DF_LR problem. See PR38711. | |
5265 | Remove the problem, so that we don't spend time updating it in | |
5266 | any of the df_analyze() calls during IRA/LRA. */ | |
5267 | if (optimize > 1) | |
5268 | df_remove_problem (df_live); | |
5269 | gcc_checking_assert (df_live == NULL); | |
5270 | ||
b2b29377 MM |
5271 | if (flag_checking) |
5272 | df->changeable_flags |= DF_VERIFY_SCHEDULED; | |
5273 | ||
058e97ec | 5274 | df_analyze (); |
3b6d1699 | 5275 | |
2d73cc45 MJ |
5276 | init_reg_equiv (); |
5277 | if (ira_conflicts_p) | |
5278 | { | |
5279 | calculate_dominance_info (CDI_DOMINATORS); | |
5280 | ||
5281 | if (split_live_ranges_for_shrink_wrap ()) | |
5282 | df_analyze (); | |
5283 | ||
5284 | free_dominance_info (CDI_DOMINATORS); | |
5285 | } | |
5286 | ||
058e97ec | 5287 | df_clear_flags (DF_NO_INSN_RESCAN); |
2d73cc45 | 5288 | |
ba52669f AM |
5289 | indirect_jump_optimize (); |
5290 | if (delete_trivially_dead_insns (get_insns (), max_reg_num ())) | |
5291 | df_analyze (); | |
5292 | ||
058e97ec VM |
5293 | regstat_init_n_sets_and_refs (); |
5294 | regstat_compute_ri (); | |
5295 | ||
5296 | /* If we are not optimizing, then this is the only place before | |
5297 | register allocation where dataflow is done. And that is needed | |
5298 | to generate these warnings. */ | |
5299 | if (warn_clobbered) | |
5300 | generate_setjmp_warnings (); | |
5301 | ||
1833192f | 5302 | if (resize_reg_info () && flag_ira_loop_pressure) |
b11f0116 | 5303 | ira_set_pseudo_classes (true, ira_dump_file); |
1833192f | 5304 | |
42ae0d7f | 5305 | init_alias_analysis (); |
c38c11a1 | 5306 | loop_optimizer_init (AVOID_CFG_MODIFICATIONS); |
10e04446 | 5307 | reg_equiv = XCNEWVEC (struct equivalence, max_reg_num ()); |
ba52669f | 5308 | update_equiv_regs (); |
10e04446 AM |
5309 | |
5310 | /* Don't move insns if live range shrinkage or register | |
5311 | pressure-sensitive scheduling were done because it will not | |
5312 | improve allocation but likely worsen insn scheduling. */ | |
5313 | if (optimize | |
5314 | && !flag_live_range_shrinkage | |
5315 | && !(flag_sched_pressure && flag_schedule_insns)) | |
5316 | combine_and_move_insns (); | |
5317 | ||
5318 | /* Gather additional equivalences with memory. */ | |
42ae0d7f | 5319 | if (optimize) |
10e04446 AM |
5320 | add_store_equivs (); |
5321 | ||
c38c11a1 | 5322 | loop_optimizer_finalize (); |
f3c82ff9 | 5323 | free_dominance_info (CDI_DOMINATORS); |
42ae0d7f AM |
5324 | end_alias_analysis (); |
5325 | free (reg_equiv); | |
5326 | ||
55a2c322 | 5327 | setup_reg_equiv (); |
10e04446 | 5328 | grow_reg_equivs (); |
55a2c322 | 5329 | setup_reg_equiv_init (); |
058e97ec | 5330 | |
fb99ee9b | 5331 | allocated_reg_info_size = max_reg_num (); |
e8d7e3e7 VM |
5332 | |
5333 | /* It is not worth to do such improvement when we use a simple | |
5334 | allocation because of -O0 usage or because the function is too | |
5335 | big. */ | |
5336 | if (ira_conflicts_p) | |
2d73cc45 | 5337 | find_moveable_pseudos (); |
acf41a74 | 5338 | |
fb99ee9b | 5339 | max_regno_before_ira = max_reg_num (); |
8d49e7ef | 5340 | ira_setup_eliminable_regset (); |
b8698a0f | 5341 | |
058e97ec VM |
5342 | ira_overall_cost = ira_reg_cost = ira_mem_cost = 0; |
5343 | ira_load_cost = ira_store_cost = ira_shuffle_cost = 0; | |
5344 | ira_move_loops_num = ira_additional_jumps_num = 0; | |
b8698a0f | 5345 | |
058e97ec | 5346 | ira_assert (current_loops == NULL); |
2608d841 | 5347 | if (flag_ira_region == IRA_REGION_ALL || flag_ira_region == IRA_REGION_MIXED) |
661bc682 | 5348 | loop_optimizer_init (AVOID_CFG_MODIFICATIONS | LOOPS_HAVE_RECORDED_EXITS); |
b8698a0f | 5349 | |
058e97ec VM |
5350 | if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL) |
5351 | fprintf (ira_dump_file, "Building IRA IR\n"); | |
2608d841 | 5352 | loops_p = ira_build (); |
b8698a0f | 5353 | |
311aab06 | 5354 | ira_assert (ira_conflicts_p || !loops_p); |
3553f0bb VM |
5355 | |
5356 | saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots; | |
de8e52f0 | 5357 | if (too_high_register_pressure_p () || cfun->calls_setjmp) |
3553f0bb | 5358 | /* It is just wasting compiler's time to pack spilled pseudos into |
de8e52f0 VM |
5359 | stack slots in this case -- prohibit it. We also do this if |
5360 | there is setjmp call because a variable not modified between | |
5361 | setjmp and longjmp the compiler is required to preserve its | |
5362 | value and sharing slots does not guarantee it. */ | |
3553f0bb VM |
5363 | flag_ira_share_spill_slots = FALSE; |
5364 | ||
cb1ca6ac | 5365 | ira_color (); |
b8698a0f | 5366 | |
058e97ec | 5367 | ira_max_point_before_emit = ira_max_point; |
b8698a0f | 5368 | |
1756cb66 VM |
5369 | ira_initiate_emit_data (); |
5370 | ||
058e97ec | 5371 | ira_emit (loops_p); |
b8698a0f | 5372 | |
55a2c322 | 5373 | max_regno = max_reg_num (); |
311aab06 | 5374 | if (ira_conflicts_p) |
058e97ec | 5375 | { |
058e97ec | 5376 | if (! loops_p) |
55a2c322 VM |
5377 | { |
5378 | if (! ira_use_lra_p) | |
5379 | ira_initiate_assign (); | |
5380 | } | |
058e97ec VM |
5381 | else |
5382 | { | |
fb99ee9b | 5383 | expand_reg_info (); |
b8698a0f | 5384 | |
55a2c322 VM |
5385 | if (ira_use_lra_p) |
5386 | { | |
5387 | ira_allocno_t a; | |
5388 | ira_allocno_iterator ai; | |
5389 | ||
5390 | FOR_EACH_ALLOCNO (a, ai) | |
9d6e10c7 RL |
5391 | { |
5392 | int old_regno = ALLOCNO_REGNO (a); | |
5393 | int new_regno = REGNO (ALLOCNO_EMIT_DATA (a)->reg); | |
5394 | ||
5395 | ALLOCNO_REGNO (a) = new_regno; | |
5396 | ||
5397 | if (old_regno != new_regno) | |
5398 | setup_reg_classes (new_regno, reg_preferred_class (old_regno), | |
5399 | reg_alternate_class (old_regno), | |
5400 | reg_allocno_class (old_regno)); | |
5401 | } | |
55a2c322 VM |
5402 | } |
5403 | else | |
5404 | { | |
5405 | if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL) | |
5406 | fprintf (ira_dump_file, "Flattening IR\n"); | |
5407 | ira_flattening (max_regno_before_ira, ira_max_point_before_emit); | |
5408 | } | |
058e97ec VM |
5409 | /* New insns were generated: add notes and recalculate live |
5410 | info. */ | |
5411 | df_analyze (); | |
b8698a0f | 5412 | |
544e7e78 SB |
5413 | /* ??? Rebuild the loop tree, but why? Does the loop tree |
5414 | change if new insns were generated? Can that be handled | |
5415 | by updating the loop tree incrementally? */ | |
661bc682 | 5416 | loop_optimizer_finalize (); |
57548aa2 | 5417 | free_dominance_info (CDI_DOMINATORS); |
661bc682 RB |
5418 | loop_optimizer_init (AVOID_CFG_MODIFICATIONS |
5419 | | LOOPS_HAVE_RECORDED_EXITS); | |
058e97ec | 5420 | |
55a2c322 VM |
5421 | if (! ira_use_lra_p) |
5422 | { | |
5423 | setup_allocno_assignment_flags (); | |
5424 | ira_initiate_assign (); | |
5425 | ira_reassign_conflict_allocnos (max_regno); | |
5426 | } | |
058e97ec VM |
5427 | } |
5428 | } | |
5429 | ||
1756cb66 VM |
5430 | ira_finish_emit_data (); |
5431 | ||
058e97ec | 5432 | setup_reg_renumber (); |
b8698a0f | 5433 | |
058e97ec | 5434 | calculate_allocation_cost (); |
b8698a0f | 5435 | |
058e97ec | 5436 | #ifdef ENABLE_IRA_CHECKING |
e5119fab VM |
5437 | if (ira_conflicts_p && ! ira_use_lra_p) |
5438 | /* Opposite to reload pass, LRA does not use any conflict info | |
5439 | from IRA. We don't rebuild conflict info for LRA (through | |
67914693 | 5440 | ira_flattening call) and cannot use the check here. We could |
e5119fab VM |
5441 | rebuild this info for LRA in the check mode but there is a risk |
5442 | that code generated with the check and without it will be a bit | |
5443 | different. Calling ira_flattening in any mode would be a | |
5444 | wasting CPU time. So do not check the allocation for LRA. */ | |
058e97ec VM |
5445 | check_allocation (); |
5446 | #endif | |
b8698a0f | 5447 | |
ba52669f | 5448 | if (max_regno != max_regno_before_ira) |
058e97ec VM |
5449 | { |
5450 | regstat_free_n_sets_and_refs (); | |
5451 | regstat_free_ri (); | |
5452 | regstat_init_n_sets_and_refs (); | |
5453 | regstat_compute_ri (); | |
5454 | } | |
5455 | ||
058e97ec | 5456 | overall_cost_before = ira_overall_cost; |
e5b0e1ca VM |
5457 | if (! ira_conflicts_p) |
5458 | grow_reg_equivs (); | |
5459 | else | |
058e97ec VM |
5460 | { |
5461 | fix_reg_equiv_init (); | |
b8698a0f | 5462 | |
058e97ec VM |
5463 | #ifdef ENABLE_IRA_CHECKING |
5464 | print_redundant_copies (); | |
5465 | #endif | |
9994ad20 KC |
5466 | if (! ira_use_lra_p) |
5467 | { | |
5468 | ira_spilled_reg_stack_slots_num = 0; | |
5469 | ira_spilled_reg_stack_slots | |
5470 | = ((struct ira_spilled_reg_stack_slot *) | |
5471 | ira_allocate (max_regno | |
5472 | * sizeof (struct ira_spilled_reg_stack_slot))); | |
1c252ef3 | 5473 | memset ((void *)ira_spilled_reg_stack_slots, 0, |
9994ad20 KC |
5474 | max_regno * sizeof (struct ira_spilled_reg_stack_slot)); |
5475 | } | |
058e97ec | 5476 | } |
6399c0ab | 5477 | allocate_initial_values (); |
e8d7e3e7 VM |
5478 | |
5479 | /* See comment for find_moveable_pseudos call. */ | |
5480 | if (ira_conflicts_p) | |
5481 | move_unallocated_pseudos (); | |
55a2c322 VM |
5482 | |
5483 | /* Restore original values. */ | |
5484 | if (lra_simple_p) | |
5485 | { | |
5486 | flag_caller_saves = saved_flag_caller_saves; | |
5487 | flag_ira_region = saved_flag_ira_region; | |
5488 | } | |
d3afd9aa RB |
5489 | } |
5490 | ||
5491 | static void | |
5492 | do_reload (void) | |
5493 | { | |
5494 | basic_block bb; | |
5495 | bool need_dce; | |
bcb21886 | 5496 | unsigned pic_offset_table_regno = INVALID_REGNUM; |
ae2b9cb6 | 5497 | |
67463efb | 5498 | if (flag_ira_verbose < 10) |
ae2b9cb6 | 5499 | ira_dump_file = dump_file; |
058e97ec | 5500 | |
bcb21886 KY |
5501 | /* If pic_offset_table_rtx is a pseudo register, then keep it so |
5502 | after reload to avoid possible wrong usages of hard reg assigned | |
5503 | to it. */ | |
5504 | if (pic_offset_table_rtx | |
5505 | && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER) | |
5506 | pic_offset_table_regno = REGNO (pic_offset_table_rtx); | |
5507 | ||
55a2c322 VM |
5508 | timevar_push (TV_RELOAD); |
5509 | if (ira_use_lra_p) | |
5510 | { | |
5511 | if (current_loops != NULL) | |
5512 | { | |
661bc682 | 5513 | loop_optimizer_finalize (); |
55a2c322 VM |
5514 | free_dominance_info (CDI_DOMINATORS); |
5515 | } | |
04a90bec | 5516 | FOR_ALL_BB_FN (bb, cfun) |
55a2c322 VM |
5517 | bb->loop_father = NULL; |
5518 | current_loops = NULL; | |
55a2c322 VM |
5519 | |
5520 | ira_destroy (); | |
058e97ec | 5521 | |
55a2c322 VM |
5522 | lra (ira_dump_file); |
5523 | /* ???!!! Move it before lra () when we use ira_reg_equiv in | |
5524 | LRA. */ | |
9771b263 | 5525 | vec_free (reg_equivs); |
55a2c322 VM |
5526 | reg_equivs = NULL; |
5527 | need_dce = false; | |
5528 | } | |
5529 | else | |
5530 | { | |
5531 | df_set_flags (DF_NO_INSN_RESCAN); | |
5532 | build_insn_chain (); | |
55a2c322 | 5533 | |
355a43a1 | 5534 | need_dce = reload (get_insns (), ira_conflicts_p); |
55a2c322 VM |
5535 | } |
5536 | ||
5537 | timevar_pop (TV_RELOAD); | |
058e97ec | 5538 | |
d3afd9aa RB |
5539 | timevar_push (TV_IRA); |
5540 | ||
55a2c322 | 5541 | if (ira_conflicts_p && ! ira_use_lra_p) |
058e97ec VM |
5542 | { |
5543 | ira_free (ira_spilled_reg_stack_slots); | |
058e97ec | 5544 | ira_finish_assign (); |
b8698a0f | 5545 | } |
55a2c322 | 5546 | |
058e97ec VM |
5547 | if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL |
5548 | && overall_cost_before != ira_overall_cost) | |
16998094 | 5549 | fprintf (ira_dump_file, "+++Overall after reload %" PRId64 "\n", |
2bf7560b | 5550 | ira_overall_cost); |
b8698a0f | 5551 | |
3553f0bb VM |
5552 | flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots; |
5553 | ||
55a2c322 | 5554 | if (! ira_use_lra_p) |
2608d841 | 5555 | { |
55a2c322 VM |
5556 | ira_destroy (); |
5557 | if (current_loops != NULL) | |
5558 | { | |
661bc682 | 5559 | loop_optimizer_finalize (); |
55a2c322 VM |
5560 | free_dominance_info (CDI_DOMINATORS); |
5561 | } | |
04a90bec | 5562 | FOR_ALL_BB_FN (bb, cfun) |
55a2c322 VM |
5563 | bb->loop_father = NULL; |
5564 | current_loops = NULL; | |
5565 | ||
5566 | regstat_free_ri (); | |
5567 | regstat_free_n_sets_and_refs (); | |
2608d841 | 5568 | } |
b8698a0f | 5569 | |
058e97ec | 5570 | if (optimize) |
55a2c322 | 5571 | cleanup_cfg (CLEANUP_EXPENSIVE); |
b8698a0f | 5572 | |
55a2c322 | 5573 | finish_reg_equiv (); |
058e97ec VM |
5574 | |
5575 | bitmap_obstack_release (&ira_bitmap_obstack); | |
5576 | #ifndef IRA_NO_OBSTACK | |
5577 | obstack_free (&ira_obstack, NULL); | |
5578 | #endif | |
5579 | ||
5580 | /* The code after the reload has changed so much that at this point | |
b0c11403 | 5581 | we might as well just rescan everything. Note that |
058e97ec VM |
5582 | df_rescan_all_insns is not going to help here because it does not |
5583 | touch the artificial uses and defs. */ | |
5584 | df_finish_pass (true); | |
058e97ec VM |
5585 | df_scan_alloc (NULL); |
5586 | df_scan_blocks (); | |
5587 | ||
5d517141 SB |
5588 | if (optimize > 1) |
5589 | { | |
5590 | df_live_add_problem (); | |
5591 | df_live_set_all_dirty (); | |
5592 | } | |
5593 | ||
058e97ec VM |
5594 | if (optimize) |
5595 | df_analyze (); | |
5596 | ||
b0c11403 JL |
5597 | if (need_dce && optimize) |
5598 | run_fast_dce (); | |
d3afd9aa | 5599 | |
af6e8467 RH |
5600 | /* Diagnose uses of the hard frame pointer when it is used as a global |
5601 | register. Often we can get away with letting the user appropriate | |
5602 | the frame pointer, but we should let them know when code generation | |
5603 | makes that impossible. */ | |
5604 | if (global_regs[HARD_FRAME_POINTER_REGNUM] && frame_pointer_needed) | |
5605 | { | |
5606 | tree decl = global_regs_decl[HARD_FRAME_POINTER_REGNUM]; | |
5607 | error_at (DECL_SOURCE_LOCATION (current_function_decl), | |
5608 | "frame pointer required, but reserved"); | |
5609 | inform (DECL_SOURCE_LOCATION (decl), "for %qD", decl); | |
5610 | } | |
5611 | ||
355a43a1 EB |
5612 | /* If we are doing generic stack checking, give a warning if this |
5613 | function's frame size is larger than we expect. */ | |
5614 | if (flag_stack_check == GENERIC_STACK_CHECK) | |
5615 | { | |
f075bd95 | 5616 | poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE; |
355a43a1 EB |
5617 | |
5618 | for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
5619 | if (df_regs_ever_live_p (i) && !fixed_regs[i] && call_used_regs[i]) | |
5620 | size += UNITS_PER_WORD; | |
5621 | ||
f075bd95 | 5622 | if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE) |
355a43a1 EB |
5623 | warning (0, "frame size too large for reliable stack checking"); |
5624 | } | |
5625 | ||
bcb21886 KY |
5626 | if (pic_offset_table_regno != INVALID_REGNUM) |
5627 | pic_offset_table_rtx = gen_rtx_REG (Pmode, pic_offset_table_regno); | |
5628 | ||
d3afd9aa | 5629 | timevar_pop (TV_IRA); |
058e97ec | 5630 | } |
058e97ec | 5631 | \f |
058e97ec | 5632 | /* Run the integrated register allocator. */ |
058e97ec | 5633 | |
27a4cd48 DM |
5634 | namespace { |
5635 | ||
5636 | const pass_data pass_data_ira = | |
058e97ec | 5637 | { |
27a4cd48 DM |
5638 | RTL_PASS, /* type */ |
5639 | "ira", /* name */ | |
5640 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
5641 | TV_IRA, /* tv_id */ |
5642 | 0, /* properties_required */ | |
5643 | 0, /* properties_provided */ | |
5644 | 0, /* properties_destroyed */ | |
5645 | 0, /* todo_flags_start */ | |
5646 | TODO_do_not_ggc_collect, /* todo_flags_finish */ | |
d3afd9aa RB |
5647 | }; |
5648 | ||
27a4cd48 DM |
5649 | class pass_ira : public rtl_opt_pass |
5650 | { | |
5651 | public: | |
c3284718 RS |
5652 | pass_ira (gcc::context *ctxt) |
5653 | : rtl_opt_pass (pass_data_ira, ctxt) | |
27a4cd48 DM |
5654 | {} |
5655 | ||
5656 | /* opt_pass methods: */ | |
a50fa76a BS |
5657 | virtual bool gate (function *) |
5658 | { | |
5659 | return !targetm.no_register_allocation; | |
5660 | } | |
be55bfe6 TS |
5661 | virtual unsigned int execute (function *) |
5662 | { | |
5663 | ira (dump_file); | |
5664 | return 0; | |
5665 | } | |
27a4cd48 DM |
5666 | |
5667 | }; // class pass_ira | |
5668 | ||
5669 | } // anon namespace | |
5670 | ||
5671 | rtl_opt_pass * | |
5672 | make_pass_ira (gcc::context *ctxt) | |
5673 | { | |
5674 | return new pass_ira (ctxt); | |
5675 | } | |
5676 | ||
27a4cd48 DM |
5677 | namespace { |
5678 | ||
5679 | const pass_data pass_data_reload = | |
d3afd9aa | 5680 | { |
27a4cd48 DM |
5681 | RTL_PASS, /* type */ |
5682 | "reload", /* name */ | |
5683 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
5684 | TV_RELOAD, /* tv_id */ |
5685 | 0, /* properties_required */ | |
5686 | 0, /* properties_provided */ | |
5687 | 0, /* properties_destroyed */ | |
5688 | 0, /* todo_flags_start */ | |
5689 | 0, /* todo_flags_finish */ | |
058e97ec | 5690 | }; |
27a4cd48 DM |
5691 | |
5692 | class pass_reload : public rtl_opt_pass | |
5693 | { | |
5694 | public: | |
c3284718 RS |
5695 | pass_reload (gcc::context *ctxt) |
5696 | : rtl_opt_pass (pass_data_reload, ctxt) | |
27a4cd48 DM |
5697 | {} |
5698 | ||
5699 | /* opt_pass methods: */ | |
a50fa76a BS |
5700 | virtual bool gate (function *) |
5701 | { | |
5702 | return !targetm.no_register_allocation; | |
5703 | } | |
be55bfe6 TS |
5704 | virtual unsigned int execute (function *) |
5705 | { | |
5706 | do_reload (); | |
5707 | return 0; | |
5708 | } | |
27a4cd48 DM |
5709 | |
5710 | }; // class pass_reload | |
5711 | ||
5712 | } // anon namespace | |
5713 | ||
5714 | rtl_opt_pass * | |
5715 | make_pass_reload (gcc::context *ctxt) | |
5716 | { | |
5717 | return new pass_reload (ctxt); | |
5718 | } |