]>
Commit | Line | Data |
---|---|---|
c6a6cdaa | 1 | /* Change pseudos by memory. |
f1717362 | 2 | Copyright (C) 2010-2016 Free Software Foundation, Inc. |
c6a6cdaa | 3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 3, or (at your option) any later | |
10 | version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | ||
22 | /* This file contains code for a pass to change spilled pseudos into | |
23 | memory. | |
24 | ||
25 | The pass creates necessary stack slots and assigns spilled pseudos | |
26 | to the stack slots in following way: | |
27 | ||
28 | for all spilled pseudos P most frequently used first do | |
29 | for all stack slots S do | |
30 | if P doesn't conflict with pseudos assigned to S then | |
31 | assign S to P and goto to the next pseudo process | |
32 | end | |
33 | end | |
34 | create new stack slot S and assign P to S | |
35 | end | |
1a8f8886 | 36 | |
c6a6cdaa | 37 | The actual algorithm is bit more complicated because of different |
38 | pseudo sizes. | |
39 | ||
40 | After that the code changes spilled pseudos (except ones created | |
41 | from scratches) by corresponding stack slot memory in RTL. | |
42 | ||
43 | If at least one stack slot was created, we need to run more passes | |
44 | because we have new addresses which should be checked and because | |
45 | the old address displacements might change and address constraints | |
46 | (or insn memory constraints) might not be satisfied any more. | |
47 | ||
48 | For some targets, the pass can spill some pseudos into hard | |
49 | registers of different class (usually into vector registers) | |
50 | instead of spilling them into memory if it is possible and | |
51 | profitable. Spilling GENERAL_REGS pseudo into SSE registers for | |
52 | Intel Corei7 is an example of such optimization. And this is | |
53 | actually recommended by Intel optimization guide. | |
54 | ||
55 | The file also contains code for final change of pseudos on hard | |
56 | regs correspondingly assigned to them. */ | |
57 | ||
58 | #include "config.h" | |
59 | #include "system.h" | |
60 | #include "coretypes.h" | |
9ef16211 | 61 | #include "backend.h" |
7c29e30e | 62 | #include "target.h" |
c6a6cdaa | 63 | #include "rtl.h" |
9ef16211 | 64 | #include "df.h" |
c6a6cdaa | 65 | #include "insn-config.h" |
7c29e30e | 66 | #include "regs.h" |
67 | #include "ira.h" | |
c6a6cdaa | 68 | #include "recog.h" |
69 | #include "output.h" | |
94ea8568 | 70 | #include "cfgrtl.h" |
9ef16211 | 71 | #include "lra.h" |
c6a6cdaa | 72 | #include "lra-int.h" |
c6a6cdaa | 73 | |
74 | ||
75 | /* Max regno at the start of the pass. */ | |
76 | static int regs_num; | |
77 | ||
78 | /* Map spilled regno -> hard regno used instead of memory for | |
79 | spilling. */ | |
80 | static rtx *spill_hard_reg; | |
81 | ||
82 | /* The structure describes stack slot of a spilled pseudo. */ | |
83 | struct pseudo_slot | |
84 | { | |
85 | /* Number (0, 1, ...) of the stack slot to which given pseudo | |
86 | belongs. */ | |
87 | int slot_num; | |
88 | /* First or next slot with the same slot number. */ | |
89 | struct pseudo_slot *next, *first; | |
90 | /* Memory representing the spilled pseudo. */ | |
91 | rtx mem; | |
92 | }; | |
93 | ||
94 | /* The stack slots for each spilled pseudo. Indexed by regnos. */ | |
95 | static struct pseudo_slot *pseudo_slots; | |
96 | ||
97 | /* The structure describes a register or a stack slot which can be | |
98 | used for several spilled pseudos. */ | |
99 | struct slot | |
100 | { | |
101 | /* First pseudo with given stack slot. */ | |
102 | int regno; | |
103 | /* Hard reg into which the slot pseudos are spilled. The value is | |
104 | negative for pseudos spilled into memory. */ | |
105 | int hard_regno; | |
106 | /* Memory representing the all stack slot. It can be different from | |
107 | memory representing a pseudo belonging to give stack slot because | |
108 | pseudo can be placed in a part of the corresponding stack slot. | |
109 | The value is NULL for pseudos spilled into a hard reg. */ | |
110 | rtx mem; | |
111 | /* Combined live ranges of all pseudos belonging to given slot. It | |
112 | is used to figure out that a new spilled pseudo can use given | |
113 | stack slot. */ | |
114 | lra_live_range_t live_ranges; | |
115 | }; | |
116 | ||
117 | /* Array containing info about the stack slots. The array element is | |
118 | indexed by the stack slot number in the range [0..slots_num). */ | |
119 | static struct slot *slots; | |
120 | /* The number of the stack slots currently existing. */ | |
121 | static int slots_num; | |
122 | ||
123 | /* Set up memory of the spilled pseudo I. The function can allocate | |
124 | the corresponding stack slot if it is not done yet. */ | |
125 | static void | |
126 | assign_mem_slot (int i) | |
127 | { | |
128 | rtx x = NULL_RTX; | |
3754d046 | 129 | machine_mode mode = GET_MODE (regno_reg_rtx[i]); |
c6a6cdaa | 130 | unsigned int inherent_size = PSEUDO_REGNO_BYTES (i); |
131 | unsigned int inherent_align = GET_MODE_ALIGNMENT (mode); | |
132 | unsigned int max_ref_width = GET_MODE_SIZE (lra_reg_info[i].biggest_mode); | |
133 | unsigned int total_size = MAX (inherent_size, max_ref_width); | |
134 | unsigned int min_align = max_ref_width * BITS_PER_UNIT; | |
135 | int adjust = 0; | |
136 | ||
137 | lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i]) | |
138 | && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0); | |
1a8f8886 | 139 | |
c6a6cdaa | 140 | x = slots[pseudo_slots[i].slot_num].mem; |
1a8f8886 | 141 | |
c6a6cdaa | 142 | /* We can use a slot already allocated because it is guaranteed the |
143 | slot provides both enough inherent space and enough total | |
144 | space. */ | |
145 | if (x) | |
146 | ; | |
147 | /* Each pseudo has an inherent size which comes from its own mode, | |
148 | and a total size which provides room for paradoxical subregs | |
149 | which refer to the pseudo reg in wider modes. We allocate a new | |
150 | slot, making sure that it has enough inherent space and total | |
151 | space. */ | |
152 | else | |
153 | { | |
154 | rtx stack_slot; | |
155 | ||
156 | /* No known place to spill from => no slot to reuse. */ | |
157 | x = assign_stack_local (mode, total_size, | |
158 | min_align > inherent_align | |
159 | || total_size > inherent_size ? -1 : 0); | |
c6a6cdaa | 160 | stack_slot = x; |
161 | /* Cancel the big-endian correction done in assign_stack_local. | |
162 | Get the address of the beginning of the slot. This is so we | |
163 | can do a big-endian correction unconditionally below. */ | |
164 | if (BYTES_BIG_ENDIAN) | |
165 | { | |
166 | adjust = inherent_size - total_size; | |
167 | if (adjust) | |
168 | stack_slot | |
169 | = adjust_address_nv (x, | |
170 | mode_for_size (total_size * BITS_PER_UNIT, | |
171 | MODE_INT, 1), | |
172 | adjust); | |
173 | } | |
174 | slots[pseudo_slots[i].slot_num].mem = stack_slot; | |
175 | } | |
1a8f8886 | 176 | |
c6a6cdaa | 177 | /* On a big endian machine, the "address" of the slot is the address |
178 | of the low part that fits its inherent mode. */ | |
179 | if (BYTES_BIG_ENDIAN && inherent_size < total_size) | |
180 | adjust += (total_size - inherent_size); | |
1a8f8886 | 181 | |
c6a6cdaa | 182 | x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust); |
1a8f8886 | 183 | |
c6a6cdaa | 184 | /* Set all of the memory attributes as appropriate for a spill. */ |
185 | set_mem_attrs_for_spill (x); | |
186 | pseudo_slots[i].mem = x; | |
187 | } | |
188 | ||
189 | /* Sort pseudos according their usage frequencies. */ | |
190 | static int | |
191 | regno_freq_compare (const void *v1p, const void *v2p) | |
192 | { | |
193 | const int regno1 = *(const int *) v1p; | |
194 | const int regno2 = *(const int *) v2p; | |
195 | int diff; | |
196 | ||
197 | if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0) | |
198 | return diff; | |
199 | return regno1 - regno2; | |
200 | } | |
201 | ||
c6a6cdaa | 202 | /* Sort pseudos according to their slots, putting the slots in the order |
203 | that they should be allocated. Slots with lower numbers have the highest | |
204 | priority and should get the smallest displacement from the stack or | |
205 | frame pointer (whichever is being used). | |
206 | ||
207 | The first allocated slot is always closest to the frame pointer, | |
208 | so prefer lower slot numbers when frame_pointer_needed. If the stack | |
209 | and frame grow in the same direction, then the first allocated slot is | |
210 | always closest to the initial stack pointer and furthest away from the | |
211 | final stack pointer, so allocate higher numbers first when using the | |
212 | stack pointer in that case. The reverse is true if the stack and | |
213 | frame grow in opposite directions. */ | |
214 | static int | |
215 | pseudo_reg_slot_compare (const void *v1p, const void *v2p) | |
216 | { | |
217 | const int regno1 = *(const int *) v1p; | |
218 | const int regno2 = *(const int *) v2p; | |
219 | int diff, slot_num1, slot_num2; | |
220 | int total_size1, total_size2; | |
221 | ||
222 | slot_num1 = pseudo_slots[regno1].slot_num; | |
223 | slot_num2 = pseudo_slots[regno2].slot_num; | |
224 | if ((diff = slot_num1 - slot_num2) != 0) | |
225 | return (frame_pointer_needed | |
47ed88a3 | 226 | || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff); |
c6a6cdaa | 227 | total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode); |
228 | total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode); | |
229 | if ((diff = total_size2 - total_size1) != 0) | |
230 | return diff; | |
231 | return regno1 - regno2; | |
232 | } | |
233 | ||
234 | /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is | |
235 | sorted in order of highest frequency first. Put the pseudos which | |
236 | did not get a spill hard register at the beginning of array | |
237 | PSEUDO_REGNOS. Return the number of such pseudos. */ | |
238 | static int | |
239 | assign_spill_hard_regs (int *pseudo_regnos, int n) | |
240 | { | |
241 | int i, k, p, regno, res, spill_class_size, hard_regno, nr; | |
242 | enum reg_class rclass, spill_class; | |
3754d046 | 243 | machine_mode mode; |
c6a6cdaa | 244 | lra_live_range_t r; |
7f836b57 | 245 | rtx_insn *insn; |
246 | rtx set; | |
c6a6cdaa | 247 | basic_block bb; |
248 | HARD_REG_SET conflict_hard_regs; | |
249 | bitmap_head ok_insn_bitmap; | |
250 | bitmap setjump_crosses = regstat_get_setjmp_crosses (); | |
251 | /* Hard registers which can not be used for any purpose at given | |
252 | program point because they are unallocatable or already allocated | |
1a8f8886 | 253 | for other pseudos. */ |
c6a6cdaa | 254 | HARD_REG_SET *reserved_hard_regs; |
255 | ||
256 | if (! lra_reg_spill_p) | |
257 | return n; | |
258 | /* Set up reserved hard regs for every program point. */ | |
259 | reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point); | |
260 | for (p = 0; p < lra_live_max_point; p++) | |
261 | COPY_HARD_REG_SET (reserved_hard_regs[p], lra_no_alloc_regs); | |
262 | for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) | |
263 | if (lra_reg_info[i].nrefs != 0 | |
264 | && (hard_regno = lra_get_regno_hard_regno (i)) >= 0) | |
265 | for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next) | |
266 | for (p = r->start; p <= r->finish; p++) | |
267 | add_to_hard_reg_set (&reserved_hard_regs[p], | |
268 | lra_reg_info[i].biggest_mode, hard_regno); | |
269 | bitmap_initialize (&ok_insn_bitmap, ®_obstack); | |
fc00614f | 270 | FOR_EACH_BB_FN (bb, cfun) |
c6a6cdaa | 271 | FOR_BB_INSNS (bb, insn) |
272 | if (DEBUG_INSN_P (insn) | |
273 | || ((set = single_set (insn)) != NULL_RTX | |
274 | && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set)))) | |
275 | bitmap_set_bit (&ok_insn_bitmap, INSN_UID (insn)); | |
276 | for (res = i = 0; i < n; i++) | |
277 | { | |
278 | regno = pseudo_regnos[i]; | |
279 | rclass = lra_get_allocno_class (regno); | |
280 | if (bitmap_bit_p (setjump_crosses, regno) | |
281 | || (spill_class | |
282 | = ((enum reg_class) | |
283 | targetm.spill_class ((reg_class_t) rclass, | |
284 | PSEUDO_REGNO_MODE (regno)))) == NO_REGS | |
285 | || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap, | |
286 | &ok_insn_bitmap)) | |
287 | { | |
288 | pseudo_regnos[res++] = regno; | |
289 | continue; | |
290 | } | |
291 | lra_assert (spill_class != NO_REGS); | |
292 | COPY_HARD_REG_SET (conflict_hard_regs, | |
293 | lra_reg_info[regno].conflict_hard_regs); | |
294 | for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next) | |
295 | for (p = r->start; p <= r->finish; p++) | |
296 | IOR_HARD_REG_SET (conflict_hard_regs, reserved_hard_regs[p]); | |
297 | spill_class_size = ira_class_hard_regs_num[spill_class]; | |
298 | mode = lra_reg_info[regno].biggest_mode; | |
299 | for (k = 0; k < spill_class_size; k++) | |
300 | { | |
301 | hard_regno = ira_class_hard_regs[spill_class][k]; | |
302 | if (! overlaps_hard_reg_set_p (conflict_hard_regs, mode, hard_regno)) | |
303 | break; | |
304 | } | |
305 | if (k >= spill_class_size) | |
306 | { | |
307 | /* There is no available regs -- assign memory later. */ | |
308 | pseudo_regnos[res++] = regno; | |
309 | continue; | |
310 | } | |
311 | if (lra_dump_file != NULL) | |
312 | fprintf (lra_dump_file, " Spill r%d into hr%d\n", regno, hard_regno); | |
313 | /* Update reserved_hard_regs. */ | |
314 | for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next) | |
315 | for (p = r->start; p <= r->finish; p++) | |
316 | add_to_hard_reg_set (&reserved_hard_regs[p], | |
317 | lra_reg_info[regno].biggest_mode, hard_regno); | |
318 | spill_hard_reg[regno] | |
319 | = gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno); | |
320 | for (nr = 0; | |
321 | nr < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode]; | |
322 | nr++) | |
fd67653e | 323 | /* Just loop. */ |
324 | df_set_regs_ever_live (hard_regno + nr, true); | |
c6a6cdaa | 325 | } |
326 | bitmap_clear (&ok_insn_bitmap); | |
327 | free (reserved_hard_regs); | |
328 | return res; | |
329 | } | |
330 | ||
331 | /* Add pseudo REGNO to slot SLOT_NUM. */ | |
332 | static void | |
333 | add_pseudo_to_slot (int regno, int slot_num) | |
334 | { | |
335 | struct pseudo_slot *first; | |
336 | ||
337 | if (slots[slot_num].regno < 0) | |
338 | { | |
339 | /* It is the first pseudo in the slot. */ | |
340 | slots[slot_num].regno = regno; | |
341 | pseudo_slots[regno].first = &pseudo_slots[regno]; | |
342 | pseudo_slots[regno].next = NULL; | |
343 | } | |
344 | else | |
345 | { | |
346 | first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno]; | |
347 | pseudo_slots[regno].next = first->next; | |
348 | first->next = &pseudo_slots[regno]; | |
349 | } | |
350 | pseudo_slots[regno].mem = NULL_RTX; | |
351 | pseudo_slots[regno].slot_num = slot_num; | |
352 | slots[slot_num].live_ranges | |
353 | = lra_merge_live_ranges (slots[slot_num].live_ranges, | |
354 | lra_copy_live_range_list | |
355 | (lra_reg_info[regno].live_ranges)); | |
356 | } | |
357 | ||
358 | /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of | |
359 | length N. Sort pseudos in PSEUDO_REGNOS for subsequent assigning | |
360 | memory stack slots. */ | |
361 | static void | |
362 | assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n) | |
363 | { | |
364 | int i, j, regno; | |
365 | ||
366 | slots_num = 0; | |
367 | /* Assign stack slot numbers to spilled pseudos, use smaller numbers | |
368 | for most frequently used pseudos. */ | |
369 | for (i = 0; i < n; i++) | |
370 | { | |
371 | regno = pseudo_regnos[i]; | |
372 | if (! flag_ira_share_spill_slots) | |
373 | j = slots_num; | |
374 | else | |
375 | { | |
376 | for (j = 0; j < slots_num; j++) | |
377 | if (slots[j].hard_regno < 0 | |
378 | && ! (lra_intersected_live_ranges_p | |
379 | (slots[j].live_ranges, | |
380 | lra_reg_info[regno].live_ranges))) | |
381 | break; | |
382 | } | |
383 | if (j >= slots_num) | |
384 | { | |
385 | /* New slot. */ | |
386 | slots[j].live_ranges = NULL; | |
387 | slots[j].regno = slots[j].hard_regno = -1; | |
388 | slots[j].mem = NULL_RTX; | |
389 | slots_num++; | |
390 | } | |
391 | add_pseudo_to_slot (regno, j); | |
392 | } | |
393 | /* Sort regnos according to their slot numbers. */ | |
394 | qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare); | |
395 | } | |
396 | ||
397 | /* Recursively process LOC in INSN and change spilled pseudos to the | |
398 | corresponding memory or spilled hard reg. Ignore spilled pseudos | |
399 | created from the scratches. */ | |
400 | static void | |
7f836b57 | 401 | remove_pseudos (rtx *loc, rtx_insn *insn) |
c6a6cdaa | 402 | { |
403 | int i; | |
404 | rtx hard_reg; | |
405 | const char *fmt; | |
406 | enum rtx_code code; | |
407 | ||
408 | if (*loc == NULL_RTX) | |
409 | return; | |
410 | code = GET_CODE (*loc); | |
411 | if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER | |
412 | && lra_get_regno_hard_regno (i) < 0 | |
413 | /* We do not want to assign memory for former scratches because | |
414 | it might result in an address reload for some targets. In | |
415 | any case we transform such pseudos not getting hard registers | |
416 | into scratches back. */ | |
417 | && ! lra_former_scratch_p (i)) | |
418 | { | |
3b3a5e5f | 419 | if ((hard_reg = spill_hard_reg[i]) != NULL_RTX) |
420 | *loc = copy_rtx (hard_reg); | |
421 | else | |
422 | { | |
423 | rtx x = lra_eliminate_regs_1 (insn, pseudo_slots[i].mem, | |
424 | GET_MODE (pseudo_slots[i].mem), | |
99535fab | 425 | false, false, 0, true); |
3b3a5e5f | 426 | *loc = x != pseudo_slots[i].mem ? x : copy_rtx (x); |
427 | } | |
c6a6cdaa | 428 | return; |
429 | } | |
430 | ||
431 | fmt = GET_RTX_FORMAT (code); | |
432 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
433 | { | |
434 | if (fmt[i] == 'e') | |
435 | remove_pseudos (&XEXP (*loc, i), insn); | |
436 | else if (fmt[i] == 'E') | |
437 | { | |
438 | int j; | |
439 | ||
440 | for (j = XVECLEN (*loc, i) - 1; j >= 0; j--) | |
441 | remove_pseudos (&XVECEXP (*loc, i, j), insn); | |
442 | } | |
443 | } | |
444 | } | |
445 | ||
446 | /* Convert spilled pseudos into their stack slots or spill hard regs, | |
447 | put insns to process on the constraint stack (that is all insns in | |
448 | which pseudos were changed to memory or spill hard regs). */ | |
449 | static void | |
450 | spill_pseudos (void) | |
451 | { | |
452 | basic_block bb; | |
7f836b57 | 453 | rtx_insn *insn; |
c6a6cdaa | 454 | int i; |
455 | bitmap_head spilled_pseudos, changed_insns; | |
456 | ||
457 | bitmap_initialize (&spilled_pseudos, ®_obstack); | |
458 | bitmap_initialize (&changed_insns, ®_obstack); | |
459 | for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) | |
460 | { | |
461 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0 | |
462 | && ! lra_former_scratch_p (i)) | |
463 | { | |
464 | bitmap_set_bit (&spilled_pseudos, i); | |
465 | bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap); | |
466 | } | |
467 | } | |
fc00614f | 468 | FOR_EACH_BB_FN (bb, cfun) |
c6a6cdaa | 469 | { |
470 | FOR_BB_INSNS (bb, insn) | |
471 | if (bitmap_bit_p (&changed_insns, INSN_UID (insn))) | |
472 | { | |
561af01c | 473 | rtx *link_loc, link; |
c6a6cdaa | 474 | remove_pseudos (&PATTERN (insn), insn); |
475 | if (CALL_P (insn)) | |
476 | remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn); | |
561af01c | 477 | for (link_loc = ®_NOTES (insn); |
478 | (link = *link_loc) != NULL_RTX; | |
479 | link_loc = &XEXP (link, 1)) | |
480 | { | |
481 | switch (REG_NOTE_KIND (link)) | |
482 | { | |
483 | case REG_FRAME_RELATED_EXPR: | |
484 | case REG_CFA_DEF_CFA: | |
485 | case REG_CFA_ADJUST_CFA: | |
486 | case REG_CFA_OFFSET: | |
487 | case REG_CFA_REGISTER: | |
488 | case REG_CFA_EXPRESSION: | |
489 | case REG_CFA_RESTORE: | |
490 | case REG_CFA_SET_VDRAP: | |
491 | remove_pseudos (&XEXP (link, 0), insn); | |
492 | break; | |
493 | default: | |
494 | break; | |
495 | } | |
496 | } | |
c6a6cdaa | 497 | if (lra_dump_file != NULL) |
498 | fprintf (lra_dump_file, | |
499 | "Changing spilled pseudos to memory in insn #%u\n", | |
500 | INSN_UID (insn)); | |
501 | lra_push_insn (insn); | |
502 | if (lra_reg_spill_p || targetm.different_addr_displacement_p ()) | |
503 | lra_set_used_insn_alternative (insn, -1); | |
504 | } | |
505 | else if (CALL_P (insn)) | |
506 | /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE does | |
507 | not affect value of insn_bitmap of the corresponding | |
508 | lra_reg_info. That is because we don't need to reload | |
509 | pseudos in CALL_INSN_FUNCTION_USAGEs. So if we process | |
510 | only insns in the insn_bitmap of given pseudo here, we | |
511 | can miss the pseudo in some | |
512 | CALL_INSN_FUNCTION_USAGEs. */ | |
513 | remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn); | |
514 | bitmap_and_compl_into (df_get_live_in (bb), &spilled_pseudos); | |
515 | bitmap_and_compl_into (df_get_live_out (bb), &spilled_pseudos); | |
516 | } | |
517 | bitmap_clear (&spilled_pseudos); | |
518 | bitmap_clear (&changed_insns); | |
519 | } | |
520 | ||
521 | /* Return true if we need to change some pseudos into memory. */ | |
522 | bool | |
523 | lra_need_for_spills_p (void) | |
524 | { | |
525 | int i; max_regno = max_reg_num (); | |
526 | ||
527 | for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) | |
528 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0 | |
529 | && ! lra_former_scratch_p (i)) | |
530 | return true; | |
531 | return false; | |
532 | } | |
533 | ||
534 | /* Change spilled pseudos into memory or spill hard regs. Put changed | |
535 | insns on the constraint stack (these insns will be considered on | |
536 | the next constraint pass). The changed insns are all insns in | |
537 | which pseudos were changed. */ | |
538 | void | |
539 | lra_spill (void) | |
540 | { | |
541 | int i, n, curr_regno; | |
542 | int *pseudo_regnos; | |
543 | ||
544 | regs_num = max_reg_num (); | |
545 | spill_hard_reg = XNEWVEC (rtx, regs_num); | |
546 | pseudo_regnos = XNEWVEC (int, regs_num); | |
547 | for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) | |
548 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0 | |
549 | /* We do not want to assign memory for former scratches. */ | |
550 | && ! lra_former_scratch_p (i)) | |
551 | { | |
552 | spill_hard_reg[i] = NULL_RTX; | |
553 | pseudo_regnos[n++] = i; | |
554 | } | |
555 | lra_assert (n > 0); | |
556 | pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num); | |
557 | slots = XNEWVEC (struct slot, regs_num); | |
558 | /* Sort regnos according their usage frequencies. */ | |
559 | qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare); | |
560 | n = assign_spill_hard_regs (pseudo_regnos, n); | |
561 | assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n); | |
562 | for (i = 0; i < n; i++) | |
563 | if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX) | |
564 | assign_mem_slot (pseudo_regnos[i]); | |
ea99c7a1 | 565 | if (n > 0 && crtl->stack_alignment_needed) |
566 | /* If we have a stack frame, we must align it now. The stack size | |
567 | may be a part of the offset computation for register | |
568 | elimination. */ | |
569 | assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed); | |
c6a6cdaa | 570 | if (lra_dump_file != NULL) |
571 | { | |
572 | for (i = 0; i < slots_num; i++) | |
573 | { | |
574 | fprintf (lra_dump_file, " Slot %d regnos (width = %d):", i, | |
575 | GET_MODE_SIZE (GET_MODE (slots[i].mem))); | |
576 | for (curr_regno = slots[i].regno;; | |
577 | curr_regno = pseudo_slots[curr_regno].next - pseudo_slots) | |
578 | { | |
579 | fprintf (lra_dump_file, " %d", curr_regno); | |
580 | if (pseudo_slots[curr_regno].next == NULL) | |
581 | break; | |
582 | } | |
583 | fprintf (lra_dump_file, "\n"); | |
584 | } | |
585 | } | |
586 | spill_pseudos (); | |
587 | free (slots); | |
588 | free (pseudo_slots); | |
589 | free (pseudo_regnos); | |
ed8fbc55 | 590 | free (spill_hard_reg); |
c6a6cdaa | 591 | } |
592 | ||
55277a10 | 593 | /* Apply alter_subreg for subregs of regs in *LOC. Use FINAL_P for |
594 | alter_subreg calls. Return true if any subreg of reg is | |
595 | processed. */ | |
596 | static bool | |
597 | alter_subregs (rtx *loc, bool final_p) | |
598 | { | |
599 | int i; | |
600 | rtx x = *loc; | |
601 | bool res; | |
602 | const char *fmt; | |
603 | enum rtx_code code; | |
604 | ||
605 | if (x == NULL_RTX) | |
606 | return false; | |
607 | code = GET_CODE (x); | |
608 | if (code == SUBREG && REG_P (SUBREG_REG (x))) | |
609 | { | |
610 | lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER); | |
611 | alter_subreg (loc, final_p); | |
612 | return true; | |
613 | } | |
614 | fmt = GET_RTX_FORMAT (code); | |
615 | res = false; | |
616 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
617 | { | |
618 | if (fmt[i] == 'e') | |
619 | { | |
620 | if (alter_subregs (&XEXP (x, i), final_p)) | |
621 | res = true; | |
622 | } | |
623 | else if (fmt[i] == 'E') | |
624 | { | |
625 | int j; | |
1a8f8886 | 626 | |
55277a10 | 627 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
628 | if (alter_subregs (&XVECEXP (x, i, j), final_p)) | |
629 | res = true; | |
630 | } | |
631 | } | |
632 | return res; | |
633 | } | |
634 | ||
8dd9f7ce | 635 | /* Return true if REGNO is used for return in the current |
636 | function. */ | |
637 | static bool | |
638 | return_regno_p (unsigned int regno) | |
639 | { | |
640 | rtx outgoing = crtl->return_rtx; | |
641 | ||
642 | if (! outgoing) | |
643 | return false; | |
644 | ||
645 | if (REG_P (outgoing)) | |
646 | return REGNO (outgoing) == regno; | |
647 | else if (GET_CODE (outgoing) == PARALLEL) | |
648 | { | |
649 | int i; | |
650 | ||
651 | for (i = 0; i < XVECLEN (outgoing, 0); i++) | |
652 | { | |
653 | rtx x = XEXP (XVECEXP (outgoing, 0, i), 0); | |
654 | ||
655 | if (REG_P (x) && REGNO (x) == regno) | |
656 | return true; | |
657 | } | |
658 | } | |
659 | return false; | |
660 | } | |
661 | ||
c6a6cdaa | 662 | /* Final change of pseudos got hard registers into the corresponding |
ae72d5b2 | 663 | hard registers and removing temporary clobbers. */ |
c6a6cdaa | 664 | void |
ae72d5b2 | 665 | lra_final_code_change (void) |
c6a6cdaa | 666 | { |
667 | int i, hard_regno; | |
668 | basic_block bb; | |
7f836b57 | 669 | rtx_insn *insn, *curr; |
c6a6cdaa | 670 | int max_regno = max_reg_num (); |
671 | ||
672 | for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) | |
673 | if (lra_reg_info[i].nrefs != 0 | |
674 | && (hard_regno = lra_get_regno_hard_regno (i)) >= 0) | |
675 | SET_REGNO (regno_reg_rtx[i], hard_regno); | |
fc00614f | 676 | FOR_EACH_BB_FN (bb, cfun) |
ae72d5b2 | 677 | FOR_BB_INSNS_SAFE (bb, insn, curr) |
c6a6cdaa | 678 | if (INSN_P (insn)) |
679 | { | |
ae72d5b2 | 680 | rtx pat = PATTERN (insn); |
681 | ||
682 | if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat)) | |
683 | { | |
684 | /* Remove clobbers temporarily created in LRA. We don't | |
685 | need them anymore and don't want to waste compiler | |
686 | time processing them in a few subsequent passes. */ | |
687 | lra_invalidate_insn_data (insn); | |
93ff53d3 | 688 | delete_insn (insn); |
ae72d5b2 | 689 | continue; |
690 | } | |
691 | ||
8dd9f7ce | 692 | /* IRA can generate move insns involving pseudos. It is |
693 | better remove them earlier to speed up compiler a bit. | |
694 | It is also better to do it here as they might not pass | |
695 | final RTL check in LRA, (e.g. insn moving a control | |
696 | register into itself). So remove an useless move insn | |
697 | unless next insn is USE marking the return reg (we should | |
698 | save this as some subsequent optimizations assume that | |
699 | such original insns are saved). */ | |
700 | if (NONJUMP_INSN_P (insn) && GET_CODE (pat) == SET | |
701 | && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat)) | |
702 | && REGNO (SET_SRC (pat)) == REGNO (SET_DEST (pat)) | |
703 | && ! return_regno_p (REGNO (SET_SRC (pat)))) | |
704 | { | |
705 | lra_invalidate_insn_data (insn); | |
706 | delete_insn (insn); | |
707 | continue; | |
708 | } | |
709 | ||
55277a10 | 710 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); |
5f40c956 | 711 | struct lra_insn_reg *reg; |
712 | ||
713 | for (reg = id->regs; reg != NULL; reg = reg->next) | |
714 | if (reg->regno >= FIRST_PSEUDO_REGISTER | |
715 | && lra_reg_info [reg->regno].nrefs == 0) | |
716 | break; | |
717 | ||
718 | if (reg != NULL) | |
719 | { | |
720 | /* Pseudos still can be in debug insns in some very rare | |
721 | and complicated cases, e.g. the pseudo was removed by | |
722 | inheritance and the debug insn is not EBBs where the | |
723 | inheritance happened. It is difficult and time | |
724 | consuming to find what hard register corresponds the | |
725 | pseudo -- so just remove the debug insn. Another | |
726 | solution could be assigning hard reg/memory but it | |
727 | would be a misleading info. It is better not to have | |
728 | info than have it wrong. */ | |
729 | lra_assert (DEBUG_INSN_P (insn)); | |
730 | lra_invalidate_insn_data (insn); | |
731 | delete_insn (insn); | |
732 | continue; | |
733 | } | |
734 | ||
ea99c7a1 | 735 | struct lra_static_insn_data *static_id = id->insn_static_data; |
c6a6cdaa | 736 | bool insn_change_p = false; |
033b4ff6 | 737 | |
738 | for (i = id->insn_static_data->n_operands - 1; i >= 0; i--) | |
739 | { | |
740 | if (! DEBUG_INSN_P (insn) && static_id->operand[i].is_operator) | |
741 | continue; | |
742 | ||
743 | rtx op = *id->operand_loc[i]; | |
744 | ||
745 | if (static_id->operand[i].type == OP_OUT | |
746 | && GET_CODE (op) == SUBREG && REG_P (SUBREG_REG (op)) | |
747 | && ! LRA_SUBREG_P (op)) | |
748 | { | |
749 | hard_regno = REGNO (SUBREG_REG (op)); | |
750 | /* We can not always remove sub-registers of | |
751 | hard-registers as we may lose information that | |
752 | only a part of registers is changed and | |
753 | subsequent optimizations may do wrong | |
754 | transformations (e.g. dead code eliminations). | |
755 | We can not also keep all sub-registers as the | |
756 | subsequent optimizations can not handle all such | |
757 | cases. Here is a compromise which works. */ | |
758 | if ((GET_MODE_SIZE (GET_MODE (op)) | |
759 | < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op)))) | |
760 | && (hard_regno_nregs[hard_regno][GET_MODE (SUBREG_REG (op))] | |
761 | == hard_regno_nregs[hard_regno][GET_MODE (op)]) | |
762 | #ifdef STACK_REGS | |
763 | && (hard_regno < FIRST_STACK_REG | |
764 | || hard_regno > LAST_STACK_REG) | |
765 | #endif | |
766 | ) | |
767 | continue; | |
768 | } | |
769 | if (alter_subregs (id->operand_loc[i], ! DEBUG_INSN_P (insn))) | |
770 | { | |
771 | lra_update_dup (id, i); | |
772 | insn_change_p = true; | |
773 | } | |
774 | } | |
c6a6cdaa | 775 | if (insn_change_p) |
776 | lra_update_operator_dups (id); | |
777 | } | |
778 | } |