]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/ra-rewrite.c
alias.c (nonlocal_mentioned_p, [...]): Use, LABEL_P, JUMP_P, CALL_P, NONJUMP_INSN_P...
[thirdparty/gcc.git] / gcc / ra-rewrite.c
CommitLineData
ed8d2920 1/* Graph coloring register allocator
e146f815 2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
ed8d2920
MM
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
16
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#include "config.h"
22#include "system.h"
4977bab6
ZW
23#include "coretypes.h"
24#include "tm.h"
ed8d2920
MM
25#include "rtl.h"
26#include "tm_p.h"
27#include "function.h"
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "basic-block.h"
31#include "df.h"
32#include "expr.h"
33#include "output.h"
34#include "except.h"
35#include "ra.h"
54b2a7f8
DB
36#include "insn-config.h"
37#include "reload.h"
ed8d2920
MM
38
39/* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
43
44struct rewrite_info;
45struct rtx_list;
46
93bad80e
SB
47static void spill_coalescing (sbitmap, sbitmap);
48static unsigned HOST_WIDE_INT spill_prop_savings (struct web *, sbitmap);
49static void spill_prop_insert (struct web *, sbitmap, sbitmap);
50static int spill_propagation (sbitmap, sbitmap, sbitmap);
51static void spill_coalprop (void);
52static void allocate_spill_web (struct web *);
53static void choose_spill_colors (void);
54static void rewrite_program (bitmap);
55static void remember_slot (struct rtx_list **, rtx);
56static int slots_overlap_p (rtx, rtx);
57static void delete_overlapping_slots (struct rtx_list **, rtx);
58static int slot_member_p (struct rtx_list *, rtx);
59static void insert_stores (bitmap);
60static int spill_same_color_p (struct web *, struct web *);
61static bool is_partly_live_1 (sbitmap, struct web *);
62static void update_spill_colors (HARD_REG_SET *, struct web *, int);
63static int spill_is_free (HARD_REG_SET *, struct web *);
64static void emit_loads (struct rewrite_info *, int, rtx);
65static void reloads_to_loads (struct rewrite_info *, struct ref **,
66 unsigned int, struct web **);
67static void rewrite_program2 (bitmap);
68static void mark_refs_for_checking (struct web *, bitmap);
69static void detect_web_parts_to_rebuild (void);
70static void delete_useless_defs (void);
71static void detect_non_changed_webs (void);
72static void reset_changed_flag (void);
ed8d2920
MM
73
74/* For tracking some statistics, we count the number (and cost)
75 of deleted move insns. */
76static unsigned int deleted_move_insns;
77static unsigned HOST_WIDE_INT deleted_move_cost;
78
79/* This is the spill coalescing phase. In SPILLED the IDs of all
80 already spilled webs are noted. In COALESCED the IDs of webs still
81 to check for coalescing. This tries to coalesce two webs, which were
82 spilled, are connected by a move, and don't conflict. Greatly
83 reduces memory shuffling. */
84
85static void
93bad80e 86spill_coalescing (sbitmap coalesce, sbitmap spilled)
ed8d2920
MM
87{
88 struct move_list *ml;
89 struct move *m;
90 for (ml = wl_moves; ml; ml = ml->next)
91 if ((m = ml->move) != NULL)
92 {
93 struct web *s = alias (m->source_web);
94 struct web *t = alias (m->target_web);
95 if ((TEST_BIT (spilled, s->id) && TEST_BIT (coalesce, t->id))
96 || (TEST_BIT (spilled, t->id) && TEST_BIT (coalesce, s->id)))
97 {
98 struct conflict_link *wl;
99 if (TEST_BIT (sup_igraph, s->id * num_webs + t->id)
100 || TEST_BIT (sup_igraph, t->id * num_webs + s->id)
101 || s->pattern || t->pattern)
102 continue;
103
104 deleted_move_insns++;
105 deleted_move_cost += BLOCK_FOR_INSN (m->insn)->frequency + 1;
106 PUT_CODE (m->insn, NOTE);
107 NOTE_LINE_NUMBER (m->insn) = NOTE_INSN_DELETED;
108 df_insn_modify (df, BLOCK_FOR_INSN (m->insn), m->insn);
109
110 m->target_web->target_of_spilled_move = 1;
111 if (s == t)
112 /* May be, already coalesced due to a former move. */
113 continue;
114 /* Merge the nodes S and T in the I-graph. Beware: the merging
115 of conflicts relies on the fact, that in the conflict list
116 of T all of it's conflicts are noted. This is currently not
117 the case if T would be the target of a coalesced web, because
118 then (in combine () above) only those conflicts were noted in
119 T from the web which was coalesced into T, which at the time
120 of combine() were not already on the SELECT stack or were
121 itself coalesced to something other. */
122 if (t->type != SPILLED || s->type != SPILLED)
123 abort ();
124 remove_list (t->dlink, &WEBS(SPILLED));
125 put_web (t, COALESCED);
126 t->alias = s;
127 s->is_coalesced = 1;
128 t->is_coalesced = 1;
129 merge_moves (s, t);
130 for (wl = t->conflict_list; wl; wl = wl->next)
131 {
132 struct web *pweb = wl->t;
133 if (wl->sub == NULL)
134 record_conflict (s, pweb);
135 else
136 {
137 struct sub_conflict *sl;
138 for (sl = wl->sub; sl; sl = sl->next)
139 {
140 struct web *sweb = NULL;
141 if (SUBWEB_P (sl->s))
142 sweb = find_subweb (s, sl->s->orig_x);
143 if (!sweb)
144 sweb = s;
145 record_conflict (sweb, sl->t);
146 }
147 }
148 /* No decrement_degree here, because we already have colored
149 the graph, and don't want to insert pweb into any other
150 list. */
151 pweb->num_conflicts -= 1 + t->add_hardregs;
152 }
153 }
154 }
155}
156
157/* Returns the probable saving of coalescing WEB with webs from
158 SPILLED, in terms of removed move insn cost. */
159
160static unsigned HOST_WIDE_INT
93bad80e 161spill_prop_savings (struct web *web, sbitmap spilled)
ed8d2920
MM
162{
163 unsigned HOST_WIDE_INT savings = 0;
164 struct move_list *ml;
165 struct move *m;
166 unsigned int cost;
167 if (web->pattern)
168 return 0;
169 cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1);
170 cost += 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0);
171 for (ml = wl_moves; ml; ml = ml->next)
172 if ((m = ml->move) != NULL)
173 {
174 struct web *s = alias (m->source_web);
175 struct web *t = alias (m->target_web);
176 if (s != web)
177 {
178 struct web *h = s;
179 s = t;
180 t = h;
181 }
182 if (s != web || !TEST_BIT (spilled, t->id) || t->pattern
183 || TEST_BIT (sup_igraph, s->id * num_webs + t->id)
184 || TEST_BIT (sup_igraph, t->id * num_webs + s->id))
185 continue;
186 savings += BLOCK_FOR_INSN (m->insn)->frequency * cost;
187 }
188 return savings;
189}
190
191/* This add all IDs of colored webs, which are connected to WEB by a move
192 to LIST and PROCESSED. */
193
194static void
93bad80e 195spill_prop_insert (struct web *web, sbitmap list, sbitmap processed)
ed8d2920
MM
196{
197 struct move_list *ml;
198 struct move *m;
199 for (ml = wl_moves; ml; ml = ml->next)
200 if ((m = ml->move) != NULL)
201 {
202 struct web *s = alias (m->source_web);
203 struct web *t = alias (m->target_web);
204 if (s != web)
205 {
206 struct web *h = s;
207 s = t;
208 t = h;
209 }
210 if (s != web || t->type != COLORED || TEST_BIT (processed, t->id))
211 continue;
212 SET_BIT (list, t->id);
213 SET_BIT (processed, t->id);
214 }
215}
216
217/* The spill propagation pass. If we have to spilled webs, the first
218 connected through a move to a colored one, and the second also connected
219 to that colored one, and this colored web is only used to connect both
220 spilled webs, it might be worthwhile to spill that colored one.
221 This is the case, if the cost of the removed copy insns (all three webs
222 could be placed into the same stack slot) is higher than the spill cost
223 of the web.
224 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
225 SPILLED the set of all spilled webs so far and PROCESSED the set
226 of all webs processed so far, so we don't do work twice. */
227
228static int
93bad80e 229spill_propagation (sbitmap to_prop, sbitmap spilled, sbitmap processed)
ed8d2920
MM
230{
231 int id;
232 int again = 0;
233 sbitmap list = sbitmap_alloc (num_webs);
234 sbitmap_zero (list);
235
236 /* First insert colored move neighbors into the candidate list. */
237 EXECUTE_IF_SET_IN_SBITMAP (to_prop, 0, id,
238 {
239 spill_prop_insert (ID2WEB (id), list, processed);
240 });
241 sbitmap_zero (to_prop);
242
243 /* For all candidates, see, if the savings are higher than it's
244 spill cost. */
245 while ((id = sbitmap_first_set_bit (list)) >= 0)
246 {
247 struct web *web = ID2WEB (id);
248 RESET_BIT (list, id);
249 if (spill_prop_savings (web, spilled) >= web->spill_cost)
250 {
251 /* If so, we found a new spilled web. Insert it's colored
252 move neighbors again, and mark, that we need to repeat the
253 whole mainloop of spillprog/coalescing again. */
254 remove_web_from_list (web);
255 web->color = -1;
256 put_web (web, SPILLED);
257 SET_BIT (spilled, id);
258 SET_BIT (to_prop, id);
259 spill_prop_insert (web, list, processed);
260 again = 1;
261 }
262 }
263 sbitmap_free (list);
264 return again;
265}
266
267/* The main phase to improve spill costs. This repeatedly runs
268 spill coalescing and spill propagation, until nothing changes. */
269
270static void
93bad80e 271spill_coalprop (void)
ed8d2920
MM
272{
273 sbitmap spilled, processed, to_prop;
274 struct dlist *d;
275 int again;
276 spilled = sbitmap_alloc (num_webs);
277 processed = sbitmap_alloc (num_webs);
278 to_prop = sbitmap_alloc (num_webs);
279 sbitmap_zero (spilled);
280 for (d = WEBS(SPILLED); d; d = d->next)
281 SET_BIT (spilled, DLIST_WEB (d)->id);
282 sbitmap_copy (to_prop, spilled);
283 sbitmap_zero (processed);
284 do
285 {
286 spill_coalescing (to_prop, spilled);
287 /* XXX Currently (with optimistic coalescing) spill_propagation()
288 doesn't give better code, sometimes it gives worse (but not by much)
289 code. I believe this is because of slightly wrong cost
290 measurements. Anyway right now it isn't worth the time it takes,
291 so deactivate it for now. */
292 again = 0 && spill_propagation (to_prop, spilled, processed);
293 }
294 while (again);
295 sbitmap_free (to_prop);
296 sbitmap_free (processed);
297 sbitmap_free (spilled);
298}
299
300/* Allocate a spill slot for a WEB. Currently we spill to pseudo
301 registers, to be able to track also webs for "stack slots", and also
302 to possibly colorize them. These pseudos are sometimes handled
303 in a special way, where we know, that they also can represent
304 MEM references. */
305
306static void
93bad80e 307allocate_spill_web (struct web *web)
ed8d2920
MM
308{
309 int regno = web->regno;
310 rtx slot;
311 if (web->stack_slot)
312 return;
313 slot = gen_reg_rtx (PSEUDO_REGNO_MODE (regno));
314 web->stack_slot = slot;
315}
316
317/* This chooses a color for all SPILLED webs for interference region
318 spilling. The heuristic isn't good in any way. */
319
320static void
93bad80e 321choose_spill_colors (void)
ed8d2920
MM
322{
323 struct dlist *d;
703ad42b 324 unsigned HOST_WIDE_INT *costs = xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
ed8d2920
MM
325 for (d = WEBS(SPILLED); d; d = d->next)
326 {
327 struct web *web = DLIST_WEB (d);
328 struct conflict_link *wl;
329 int bestc, c;
330 HARD_REG_SET avail;
331 memset (costs, 0, FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
332 for (wl = web->conflict_list; wl; wl = wl->next)
333 {
334 struct web *pweb = wl->t;
335 if (pweb->type == COLORED || pweb->type == PRECOLORED)
336 costs[pweb->color] += pweb->spill_cost;
337 }
338
339 COPY_HARD_REG_SET (avail, web->usable_regs);
340 if (web->crosses_call)
341 {
342 /* Add an arbitrary constant cost to colors not usable by
343 call-crossing webs without saves/loads. */
344 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
345 if (TEST_HARD_REG_BIT (call_used_reg_set, c))
346 costs[c] += 1000;
347 }
348 bestc = -1;
349 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
350 if ((bestc < 0 || costs[bestc] > costs[c])
351 && TEST_HARD_REG_BIT (avail, c)
352 && HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno)))
353 {
354 int i, size;
66fd46b6 355 size = hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
ed8d2920
MM
356 for (i = 1; i < size
357 && TEST_HARD_REG_BIT (avail, c + i); i++);
358 if (i == size)
359 bestc = c;
360 }
361 web->color = bestc;
362 ra_debug_msg (DUMP_PROCESS, "choosing color %d for spilled web %d\n",
363 bestc, web->id);
364 }
365
366 free (costs);
367}
368
369/* For statistics sake we count the number and cost of all new loads,
370 stores and emitted rematerializations. */
371static unsigned int emitted_spill_loads;
372static unsigned int emitted_spill_stores;
373static unsigned int emitted_remat;
374static unsigned HOST_WIDE_INT spill_load_cost;
375static unsigned HOST_WIDE_INT spill_store_cost;
376static unsigned HOST_WIDE_INT spill_remat_cost;
377
378/* In rewrite_program2() we detect if some def us useless, in the sense,
379 that the pseudo set is not live anymore at that point. The REF_IDs
380 of such defs are noted here. */
381static bitmap useless_defs;
382
383/* This is the simple and fast version of rewriting the program to
384 include spill code. It spills at every insn containing spilled
385 defs or uses. Loads are added only if flag_ra_spill_every_use is
386 nonzero, otherwise only stores will be added. This doesn't
387 support rematerialization.
388 NEW_DEATHS is filled with uids for insns, which probably contain
389 deaths. */
390
391static void
93bad80e 392rewrite_program (bitmap new_deaths)
ed8d2920
MM
393{
394 unsigned int i;
395 struct dlist *d;
396 bitmap b = BITMAP_XMALLOC ();
397
398 /* We walk over all webs, over all uses/defs. For all webs, we need
399 to look at spilled webs, and webs coalesced to spilled ones, in case
400 their alias isn't broken up, or they got spill coalesced. */
401 for (i = 0; i < 2; i++)
402 for (d = (i == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
403 {
404 struct web *web = DLIST_WEB (d);
405 struct web *aweb = alias (web);
406 unsigned int j;
407 rtx slot;
408
409 /* Is trivially true for spilled webs, but not for coalesced ones. */
410 if (aweb->type != SPILLED)
411 continue;
412
413 /* First add loads before every use, if we have to. */
414 if (flag_ra_spill_every_use)
415 {
416 bitmap_clear (b);
417 allocate_spill_web (aweb);
418 slot = aweb->stack_slot;
419 for (j = 0; j < web->num_uses; j++)
420 {
421 rtx insns, target, source;
422 rtx insn = DF_REF_INSN (web->uses[j]);
423 rtx prev = PREV_INSN (insn);
424 basic_block bb = BLOCK_FOR_INSN (insn);
425 /* Happens when spill_coalescing() deletes move insns. */
426 if (!INSN_P (insn))
427 continue;
428
429 /* Check that we didn't already added a load for this web
430 and insn. Happens, when the an insn uses the same web
431 multiple times. */
432 if (bitmap_bit_p (b, INSN_UID (insn)))
433 continue;
434 bitmap_set_bit (b, INSN_UID (insn));
435 target = DF_REF_REG (web->uses[j]);
436 source = slot;
437 start_sequence ();
438 if (GET_CODE (target) == SUBREG)
439 source = simplify_gen_subreg (GET_MODE (target), source,
440 GET_MODE (source),
441 SUBREG_BYTE (target));
442 ra_emit_move_insn (target, source);
443 insns = get_insns ();
444 end_sequence ();
445 emit_insn_before (insns, insn);
446
a813c111
SB
447 if (BB_HEAD (bb) == insn)
448 BB_HEAD (bb) = NEXT_INSN (prev);
ed8d2920
MM
449 for (insn = PREV_INSN (insn); insn != prev;
450 insn = PREV_INSN (insn))
451 {
452 set_block_for_insn (insn, bb);
453 df_insn_modify (df, bb, insn);
454 }
455
456 emitted_spill_loads++;
457 spill_load_cost += bb->frequency + 1;
458 }
459 }
460
461 /* Now emit the stores after each def.
462 If any uses were loaded from stackslots (compared to
463 rematerialized or not reloaded due to IR spilling),
464 aweb->stack_slot will be set. If not, we don't need to emit
465 any stack stores. */
466 slot = aweb->stack_slot;
467 bitmap_clear (b);
468 if (slot)
469 for (j = 0; j < web->num_defs; j++)
470 {
471 rtx insns, source, dest;
472 rtx insn = DF_REF_INSN (web->defs[j]);
473 rtx following = NEXT_INSN (insn);
474 basic_block bb = BLOCK_FOR_INSN (insn);
475 /* Happens when spill_coalescing() deletes move insns. */
476 if (!INSN_P (insn))
477 continue;
478 if (bitmap_bit_p (b, INSN_UID (insn)))
479 continue;
480 bitmap_set_bit (b, INSN_UID (insn));
481 start_sequence ();
482 source = DF_REF_REG (web->defs[j]);
483 dest = slot;
484 if (GET_CODE (source) == SUBREG)
485 dest = simplify_gen_subreg (GET_MODE (source), dest,
486 GET_MODE (dest),
487 SUBREG_BYTE (source));
488 ra_emit_move_insn (dest, source);
489
490 insns = get_insns ();
491 end_sequence ();
492 if (insns)
493 {
494 emit_insn_after (insns, insn);
a813c111
SB
495 if (BB_END (bb) == insn)
496 BB_END (bb) = PREV_INSN (following);
ed8d2920
MM
497 for (insn = insns; insn != following; insn = NEXT_INSN (insn))
498 {
499 set_block_for_insn (insn, bb);
500 df_insn_modify (df, bb, insn);
501 }
502 }
503 else
504 df_insn_modify (df, bb, insn);
505 emitted_spill_stores++;
506 spill_store_cost += bb->frequency + 1;
507 /* XXX we should set new_deaths for all inserted stores
508 whose pseudo dies here.
509 Note, that this isn't the case for _all_ stores. */
510 /* I.e. the next is wrong, and might cause some spilltemps
511 to be categorized as spilltemp2's (i.e. live over a death),
512 although they aren't. This might make them spill again,
513 which causes endlessness in the case, this insn is in fact
514 _no_ death. */
515 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
516 }
517 }
518
519 BITMAP_XFREE (b);
520}
521
522/* A simple list of rtx's. */
523struct rtx_list
524{
525 struct rtx_list *next;
526 rtx x;
527};
528
529/* Adds X to *LIST. */
530
531static void
93bad80e 532remember_slot (struct rtx_list **list, rtx x)
ed8d2920
MM
533{
534 struct rtx_list *l;
535 /* PRE: X is not already in LIST. */
703ad42b 536 l = ra_alloc (sizeof (*l));
ed8d2920
MM
537 l->next = *list;
538 l->x = x;
539 *list = l;
540}
541
542/* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
40f03658 543 thereof), return nonzero, if they overlap. REGs and MEMs don't
ed8d2920
MM
544 overlap, and if they are MEMs they must have an easy address
545 (plus (basereg) (const_inst x)), otherwise they overlap. */
546
547static int
93bad80e 548slots_overlap_p (rtx s1, rtx s2)
ed8d2920
MM
549{
550 rtx base1, base2;
551 HOST_WIDE_INT ofs1 = 0, ofs2 = 0;
552 int size1 = GET_MODE_SIZE (GET_MODE (s1));
553 int size2 = GET_MODE_SIZE (GET_MODE (s2));
554 if (GET_CODE (s1) == SUBREG)
555 ofs1 = SUBREG_BYTE (s1), s1 = SUBREG_REG (s1);
556 if (GET_CODE (s2) == SUBREG)
557 ofs2 = SUBREG_BYTE (s2), s2 = SUBREG_REG (s2);
558
559 if (s1 == s2)
560 return 1;
561
562 if (GET_CODE (s1) != GET_CODE (s2))
563 return 0;
564
f8cfc6aa 565 if (REG_P (s1) && REG_P (s2))
ed8d2920
MM
566 {
567 if (REGNO (s1) != REGNO (s2))
568 return 0;
569 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
570 return 0;
571 return 1;
572 }
3c0cb5de 573 if (!MEM_P (s1) || GET_CODE (s2) != MEM)
ed8d2920
MM
574 abort ();
575 s1 = XEXP (s1, 0);
576 s2 = XEXP (s2, 0);
f8cfc6aa 577 if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0))
ed8d2920
MM
578 || GET_CODE (XEXP (s1, 1)) != CONST_INT)
579 return 1;
f8cfc6aa 580 if (GET_CODE (s2) != PLUS || !REG_P (XEXP (s2, 0))
ed8d2920
MM
581 || GET_CODE (XEXP (s2, 1)) != CONST_INT)
582 return 1;
583 base1 = XEXP (s1, 0);
584 base2 = XEXP (s2, 0);
585 if (!rtx_equal_p (base1, base2))
586 return 1;
587 ofs1 += INTVAL (XEXP (s1, 1));
588 ofs2 += INTVAL (XEXP (s2, 1));
589 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
590 return 0;
591 return 1;
592}
593
594/* This deletes from *LIST all rtx's which overlap with X in the sense
595 of slots_overlap_p(). */
596
597static void
93bad80e 598delete_overlapping_slots (struct rtx_list **list, rtx x)
ed8d2920
MM
599{
600 while (*list)
601 {
602 if (slots_overlap_p ((*list)->x, x))
603 *list = (*list)->next;
604 else
605 list = &((*list)->next);
606 }
607}
608
609/* Returns nonzero, of X is member of LIST. */
610
611static int
93bad80e 612slot_member_p (struct rtx_list *list, rtx x)
ed8d2920
MM
613{
614 for (;list; list = list->next)
615 if (rtx_equal_p (list->x, x))
616 return 1;
617 return 0;
618}
619
620/* A more sophisticated (and slower) method of adding the stores, than
621 rewrite_program(). This goes backward the insn stream, adding
622 stores as it goes, but only if it hasn't just added a store to the
623 same location. NEW_DEATHS is a bitmap filled with uids of insns
624 containing deaths. */
625
626static void
93bad80e 627insert_stores (bitmap new_deaths)
ed8d2920
MM
628{
629 rtx insn;
630 rtx last_slot = NULL_RTX;
631 struct rtx_list *slots = NULL;
632
633 /* We go simply backwards over basic block borders. */
634 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
635 {
636 int uid = INSN_UID (insn);
637
638 /* If we reach a basic block border, which has more than one
639 outgoing edge, we simply forget all already emitted stores. */
4b4bf941 640 if (BARRIER_P (insn)
ed8d2920
MM
641 || JUMP_P (insn) || can_throw_internal (insn))
642 {
643 last_slot = NULL_RTX;
644 slots = NULL;
645 }
646 if (!INSN_P (insn))
647 continue;
648
649 /* If this insn was not just added in this pass. */
650 if (uid < insn_df_max_uid)
651 {
652 unsigned int n;
ed8d2920
MM
653 rtx following = NEXT_INSN (insn);
654 basic_block bb = BLOCK_FOR_INSN (insn);
533c4863
KG
655 struct ra_insn_info info;
656
657 info = insn_df[uid];
ed8d2920
MM
658 for (n = 0; n < info.num_defs; n++)
659 {
660 struct web *web = def2web[DF_REF_ID (info.defs[n])];
661 struct web *aweb = alias (find_web_for_subweb (web));
662 rtx slot, source;
663 if (aweb->type != SPILLED || !aweb->stack_slot)
664 continue;
665 slot = aweb->stack_slot;
666 source = DF_REF_REG (info.defs[n]);
667 /* adjust_address() might generate code. */
668 start_sequence ();
669 if (GET_CODE (source) == SUBREG)
670 slot = simplify_gen_subreg (GET_MODE (source), slot,
671 GET_MODE (slot),
672 SUBREG_BYTE (source));
673 /* If we have no info about emitted stores, or it didn't
674 contain the location we intend to use soon, then
675 add the store. */
676 if ((!last_slot || !rtx_equal_p (slot, last_slot))
677 && ! slot_member_p (slots, slot))
678 {
679 rtx insns, ni;
680 last_slot = slot;
681 remember_slot (&slots, slot);
682 ra_emit_move_insn (slot, source);
683 insns = get_insns ();
684 end_sequence ();
685 if (insns)
686 {
687 emit_insn_after (insns, insn);
a813c111
SB
688 if (BB_END (bb) == insn)
689 BB_END (bb) = PREV_INSN (following);
ed8d2920
MM
690 for (ni = insns; ni != following; ni = NEXT_INSN (ni))
691 {
692 set_block_for_insn (ni, bb);
693 df_insn_modify (df, bb, ni);
694 }
695 }
696 else
697 df_insn_modify (df, bb, insn);
698 emitted_spill_stores++;
699 spill_store_cost += bb->frequency + 1;
700 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
701 }
702 else
703 {
704 /* Otherwise ignore insns from adjust_address() above. */
705 end_sequence ();
706 }
707 }
708 }
709 /* If we look at a load generated by the allocator, forget
710 the last emitted slot, and additionally clear all slots
711 overlapping it's source (after all, we need it again). */
712 /* XXX If we emit the stack-ref directly into the using insn the
713 following needs a change, because that is no new insn. Preferably
714 we would add some notes to the insn, what stackslots are needed
715 for it. */
716 if (uid >= last_max_uid)
717 {
718 rtx set = single_set (insn);
719 last_slot = NULL_RTX;
720 /* If this was no simple set, give up, and forget everything. */
721 if (!set)
722 slots = NULL;
723 else
724 {
3c0cb5de 725 if (1 || MEM_P (SET_SRC (set)))
ed8d2920
MM
726 delete_overlapping_slots (&slots, SET_SRC (set));
727 }
728 }
729 }
730}
731
732/* Returns 1 if both colored webs have some hardregs in common, even if
733 they are not the same width. */
734
735static int
93bad80e 736spill_same_color_p (struct web *web1, struct web *web2)
ed8d2920
MM
737{
738 int c1, size1, c2, size2;
739 if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color)
740 return 0;
741 if ((c2 = alias (web2)->color) < 0 || c2 == an_unusable_color)
742 return 0;
743
744 size1 = web1->type == PRECOLORED
66fd46b6 745 ? 1 : hard_regno_nregs[c1][PSEUDO_REGNO_MODE (web1->regno)];
ed8d2920 746 size2 = web2->type == PRECOLORED
66fd46b6 747 ? 1 : hard_regno_nregs[c2][PSEUDO_REGNO_MODE (web2->regno)];
ed8d2920
MM
748 if (c1 >= c2 + size2 || c2 >= c1 + size1)
749 return 0;
750 return 1;
751}
752
753/* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
754 subwebs (or WEB itself) is live. */
755
a6be5aee 756static bool
93bad80e 757is_partly_live_1 (sbitmap live, struct web *web)
ed8d2920
MM
758{
759 do
760 if (TEST_BIT (live, web->id))
761 return 1;
762 while ((web = web->subreg_next));
763 return 0;
764}
765
766/* Fast version in case WEB has no subwebs. */
767#define is_partly_live(live, web) ((!web->subreg_next) \
768 ? TEST_BIT (live, web->id) \
769 : is_partly_live_1 (live, web))
770
771/* Change the set of currently IN_USE colors according to
772 WEB's color. Either add those colors to the hardreg set (if ADD
773 is nonzero), or remove them. */
774
775static void
93bad80e 776update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add)
ed8d2920
MM
777{
778 int c, size;
779 if ((c = alias (find_web_for_subweb (web))->color) < 0
780 || c == an_unusable_color)
781 return;
66fd46b6 782 size = hard_regno_nregs[c][GET_MODE (web->orig_x)];
ed8d2920
MM
783 if (SUBWEB_P (web))
784 {
785 c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)),
786 SUBREG_BYTE (web->orig_x),
787 GET_MODE (web->orig_x));
788 }
789 else if (web->type == PRECOLORED)
790 size = 1;
791 if (add)
792 for (; size--;)
793 SET_HARD_REG_BIT (*in_use, c + size);
794 else
795 for (; size--;)
796 CLEAR_HARD_REG_BIT (*in_use, c + size);
797}
798
799/* Given a set of hardregs currently IN_USE and the color C of WEB,
800 return -1 if WEB has no color, 1 of it has the unusable color,
801 0 if one of it's used hardregs are in use, and 1 otherwise.
802 Generally, if WEB can't be left colorized return 1. */
803
804static int
93bad80e 805spill_is_free (HARD_REG_SET *in_use, struct web *web)
ed8d2920
MM
806{
807 int c, size;
808 if ((c = alias (web)->color) < 0)
809 return -1;
810 if (c == an_unusable_color)
811 return 1;
812 size = web->type == PRECOLORED
66fd46b6 813 ? 1 : hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
ed8d2920
MM
814 for (; size--;)
815 if (TEST_HARD_REG_BIT (*in_use, c + size))
816 return 0;
817 return 1;
818}
819
820
821/* Structure for passing between rewrite_program2() and emit_loads(). */
822struct rewrite_info
823{
824 /* The web IDs which currently would need a reload. These are
825 currently live spilled webs, whose color was still free. */
826 bitmap need_reload;
827 /* We need a scratch bitmap, but don't want to allocate one a zillion
828 times. */
829 bitmap scratch;
830 /* Web IDs of currently live webs. This are the precise IDs,
831 not just those of the superwebs. If only on part is live, only
832 that ID is placed here. */
833 sbitmap live;
834 /* An array of webs, which currently need a load added.
835 They will be emitted when seeing the first death. */
836 struct web **needed_loads;
837 /* The current number of entries in needed_loads. */
838 int nl_size;
839 /* The number of bits set in need_reload. */
840 int num_reloads;
841 /* The current set of hardregs not available. */
842 HARD_REG_SET colors_in_use;
843 /* Nonzero, if we just added some spill temps to need_reload or
844 needed_loads. In this case we don't wait for the next death
845 to emit their loads. */
846 int any_spilltemps_spilled;
847 /* Nonzero, if we currently need to emit the loads. E.g. when we
848 saw an insn containing deaths. */
849 int need_load;
850};
851
852/* The needed_loads list of RI contains some webs for which
853 we add the actual load insns here. They are added just before
854 their use last seen. NL_FIRST_RELOAD is the index of the first
855 load which is a converted reload, all other entries are normal
856 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
857
858static void
93bad80e 859emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
ed8d2920
MM
860{
861 int j;
862 for (j = ri->nl_size; j;)
863 {
864 struct web *web = ri->needed_loads[--j];
865 struct web *supweb;
866 struct web *aweb;
867 rtx ni, slot, reg;
868 rtx before = NULL_RTX, after = NULL_RTX;
869 basic_block bb;
870 /* When spilltemps were spilled for the last insns, their
871 loads already are emitted, which is noted by setting
872 needed_loads[] for it to 0. */
873 if (!web)
874 continue;
875 supweb = find_web_for_subweb (web);
876 if (supweb->regno >= max_normal_pseudo)
877 abort ();
878 /* Check for web being a spilltemp, if we only want to
879 load spilltemps. Also remember, that we emitted that
880 load, which we don't need to do when we have a death,
881 because then all of needed_loads[] is emptied. */
882 if (!ri->need_load)
883 {
884 if (!supweb->spill_temp)
885 continue;
886 else
887 ri->needed_loads[j] = 0;
888 }
889 web->in_load = 0;
890 /* The adding of reloads doesn't depend on liveness. */
891 if (j < nl_first_reload && !TEST_BIT (ri->live, web->id))
892 continue;
893 aweb = alias (supweb);
894 aweb->changed = 1;
895 start_sequence ();
896 if (supweb->pattern)
897 {
898 /* XXX If we later allow non-constant sources for rematerialization
899 we must also disallow coalescing _to_ rematerialized webs
900 (at least then disallow spilling them, which we already ensure
901 when flag_ra_break_aliases), or not take the pattern but a
902 stackslot. */
903 if (aweb != supweb)
904 abort ();
905 slot = copy_rtx (supweb->pattern);
906 reg = copy_rtx (supweb->orig_x);
907 /* Sanity check. orig_x should be a REG rtx, which should be
908 shared over all RTL, so copy_rtx should have no effect. */
909 if (reg != supweb->orig_x)
910 abort ();
911 }
912 else
913 {
914 allocate_spill_web (aweb);
915 slot = aweb->stack_slot;
916
917 /* If we don't copy the RTL there might be some SUBREG
918 rtx shared in the next iteration although being in
919 different webs, which leads to wrong code. */
920 reg = copy_rtx (web->orig_x);
921 if (GET_CODE (reg) == SUBREG)
922 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
923 (reg));*/
924 slot = simplify_gen_subreg (GET_MODE (reg), slot, GET_MODE (slot),
925 SUBREG_BYTE (reg));
926 }
927 ra_emit_move_insn (reg, slot);
928 ni = get_insns ();
929 end_sequence ();
930 before = web->last_use_insn;
931 web->last_use_insn = NULL_RTX;
932 if (!before)
933 {
934 if (JUMP_P (last_block_insn))
935 before = last_block_insn;
936 else
937 after = last_block_insn;
938 }
939 if (after)
940 {
941 rtx foll = NEXT_INSN (after);
942 bb = BLOCK_FOR_INSN (after);
943 emit_insn_after (ni, after);
a813c111
SB
944 if (BB_END (bb) == after)
945 BB_END (bb) = PREV_INSN (foll);
ed8d2920
MM
946 for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni))
947 {
948 set_block_for_insn (ni, bb);
949 df_insn_modify (df, bb, ni);
950 }
951 }
952 else
953 {
954 rtx prev = PREV_INSN (before);
955 bb = BLOCK_FOR_INSN (before);
956 emit_insn_before (ni, before);
a813c111
SB
957 if (BB_HEAD (bb) == before)
958 BB_HEAD (bb) = NEXT_INSN (prev);
ed8d2920
MM
959 for (; ni != before; ni = NEXT_INSN (ni))
960 {
961 set_block_for_insn (ni, bb);
962 df_insn_modify (df, bb, ni);
963 }
964 }
965 if (supweb->pattern)
966 {
967 emitted_remat++;
968 spill_remat_cost += bb->frequency + 1;
969 }
970 else
971 {
972 emitted_spill_loads++;
973 spill_load_cost += bb->frequency + 1;
974 }
975 RESET_BIT (ri->live, web->id);
976 /* In the special case documented above only emit the reloads and
977 one load. */
978 if (ri->need_load == 2 && j < nl_first_reload)
979 break;
980 }
981 if (ri->need_load)
982 ri->nl_size = j;
983}
984
985/* Given a set of reloads in RI, an array of NUM_REFS references (either
986 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
987 (either use2web or def2web) convert some reloads to loads.
988 This looks at the webs referenced, and how they change the set of
989 available colors. Now put all still live webs, which needed reloads,
990 and whose colors isn't free anymore, on the needed_loads list. */
991
992static void
93bad80e
SB
993reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
994 unsigned int num_refs, struct web **ref2web)
ed8d2920
MM
995{
996 unsigned int n;
997 int num_reloads = ri->num_reloads;
998 for (n = 0; n < num_refs && num_reloads; n++)
999 {
1000 struct web *web = ref2web[DF_REF_ID (refs[n])];
1001 struct web *supweb = find_web_for_subweb (web);
1002 int is_death;
1003 int j;
1004 /* Only emit reloads when entering their interference
1005 region. A use of a spilled web never opens an
1006 interference region, independent of it's color. */
1007 if (alias (supweb)->type == SPILLED)
1008 continue;
1009 if (supweb->type == PRECOLORED
1010 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1011 continue;
1012 /* Note, that if web (and supweb) are DEFs, we already cleared
1013 the corresponding bits in live. I.e. is_death becomes true, which
1014 is what we want. */
1015 is_death = !TEST_BIT (ri->live, supweb->id);
1016 is_death &= !TEST_BIT (ri->live, web->id);
1017 if (is_death)
1018 {
1019 int old_num_r = num_reloads;
1020 bitmap_clear (ri->scratch);
1021 EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j,
1022 {
1023 struct web *web2 = ID2WEB (j);
1024 struct web *aweb2 = alias (find_web_for_subweb (web2));
1025 if (spill_is_free (&(ri->colors_in_use), aweb2) == 0)
1026 abort ();
1027 if (spill_same_color_p (supweb, aweb2)
1028 /* && interfere (web, web2) */)
1029 {
1030 if (!web2->in_load)
1031 {
1032 ri->needed_loads[ri->nl_size++] = web2;
1033 web2->in_load = 1;
1034 }
1035 bitmap_set_bit (ri->scratch, j);
1036 num_reloads--;
1037 }
1038 });
1039 if (num_reloads != old_num_r)
1040 bitmap_operation (ri->need_reload, ri->need_reload, ri->scratch,
1041 BITMAP_AND_COMPL);
1042 }
1043 }
1044 ri->num_reloads = num_reloads;
1045}
1046
1047/* This adds loads for spilled webs to the program. It uses a kind of
1048 interference region spilling. If flag_ra_ir_spilling is zero it
1049 only uses improved chaitin spilling (adding loads only at insns
1050 containing deaths). */
1051
1052static void
93bad80e 1053rewrite_program2 (bitmap new_deaths)
ed8d2920 1054{
7e657a61 1055 basic_block bb = NULL;
ed8d2920
MM
1056 int nl_first_reload;
1057 struct rewrite_info ri;
1058 rtx insn;
703ad42b 1059 ri.needed_loads = xmalloc (num_webs * sizeof (struct web *));
ed8d2920
MM
1060 ri.need_reload = BITMAP_XMALLOC ();
1061 ri.scratch = BITMAP_XMALLOC ();
1062 ri.live = sbitmap_alloc (num_webs);
1063 ri.nl_size = 0;
1064 ri.num_reloads = 0;
1065 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1066 {
1067 basic_block last_bb = NULL;
1068 rtx last_block_insn;
1069 int i, j;
1070 if (!INSN_P (insn))
1071 insn = prev_real_insn (insn);
1072 while (insn && !(bb = BLOCK_FOR_INSN (insn)))
1073 insn = prev_real_insn (insn);
1074 if (!insn)
1075 break;
1076 i = bb->index + 2;
1077 last_block_insn = insn;
1078
1079 sbitmap_zero (ri.live);
1080 CLEAR_HARD_REG_SET (ri.colors_in_use);
1081 EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j,
1082 {
1083 struct web *web = use2web[j];
1084 struct web *aweb = alias (find_web_for_subweb (web));
1085 /* A web is only live at end, if it isn't spilled. If we wouldn't
1086 check this, the last uses of spilled web per basic block
1087 wouldn't be detected as deaths, although they are in the final
1088 code. This would lead to cumulating many loads without need,
1089 only increasing register pressure. */
1090 /* XXX do add also spilled webs which got a color for IR spilling.
1091 Remember to not add to colors_in_use in that case. */
1092 if (aweb->type != SPILLED /*|| aweb->color >= 0*/)
1093 {
1094 SET_BIT (ri.live, web->id);
1095 if (aweb->type != SPILLED)
1096 update_spill_colors (&(ri.colors_in_use), web, 1);
1097 }
1098 });
1099
1100 bitmap_clear (ri.need_reload);
1101 ri.num_reloads = 0;
1102 ri.any_spilltemps_spilled = 0;
1103 if (flag_ra_ir_spilling)
1104 {
1105 struct dlist *d;
1106 int pass;
1107 /* XXX If we don't add spilled nodes into live above, the following
1108 becomes an empty loop. */
1109 for (pass = 0; pass < 2; pass++)
1110 for (d = (pass) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1111 {
1112 struct web *web = DLIST_WEB (d);
1113 struct web *aweb = alias (web);
1114 if (aweb->type != SPILLED)
1115 continue;
1116 if (is_partly_live (ri.live, web)
1117 && spill_is_free (&(ri.colors_in_use), web) > 0)
1118 {
1119 ri.num_reloads++;
1120 bitmap_set_bit (ri.need_reload, web->id);
1121 /* Last using insn is somewhere in another block. */
1122 web->last_use_insn = NULL_RTX;
1123 }
1124 }
1125 }
1126
1127 last_bb = bb;
1128 for (; insn; insn = PREV_INSN (insn))
1129 {
1130 struct ra_insn_info info;
1131 unsigned int n;
1132
6de9cd9a
DN
1133 memset (&info, 0, sizeof info);
1134
ed8d2920
MM
1135 if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb)
1136 {
1137 int index = BLOCK_FOR_INSN (insn)->index + 2;
1138 EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j,
1139 {
1140 struct web *web = use2web[j];
1141 struct web *aweb = alias (find_web_for_subweb (web));
1142 if (aweb->type != SPILLED)
1143 {
1144 SET_BIT (ri.live, web->id);
1145 update_spill_colors (&(ri.colors_in_use), web, 1);
1146 }
1147 });
1148 bitmap_clear (ri.scratch);
1149 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
1150 {
1151 struct web *web2 = ID2WEB (j);
1152 struct web *supweb2 = find_web_for_subweb (web2);
1153 struct web *aweb2 = alias (supweb2);
1154 if (spill_is_free (&(ri.colors_in_use), aweb2) <= 0)
1155 {
1156 if (!web2->in_load)
1157 {
1158 ri.needed_loads[ri.nl_size++] = web2;
1159 web2->in_load = 1;
1160 }
1161 bitmap_set_bit (ri.scratch, j);
1162 ri.num_reloads--;
1163 }
1164 });
1165 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1166 BITMAP_AND_COMPL);
1167 last_bb = BLOCK_FOR_INSN (insn);
1168 last_block_insn = insn;
1169 if (!INSN_P (last_block_insn))
1170 last_block_insn = prev_real_insn (last_block_insn);
1171 }
1172
1173 ri.need_load = 0;
1174 if (INSN_P (insn))
1175 info = insn_df[INSN_UID (insn)];
1176
1177 if (INSN_P (insn))
1178 for (n = 0; n < info.num_defs; n++)
1179 {
1180 struct ref *ref = info.defs[n];
1181 struct web *web = def2web[DF_REF_ID (ref)];
1182 struct web *supweb = find_web_for_subweb (web);
1183 int is_non_def = 0;
1184 unsigned int n2;
1185
1186 supweb = find_web_for_subweb (web);
1187 /* Webs which are defined here, but also used in the same insn
1188 are rmw webs, or this use isn't a death because of looping
1189 constructs. In neither case makes this def available it's
1190 resources. Reloads for it are still needed, it's still
1191 live and it's colors don't become free. */
1192 for (n2 = 0; n2 < info.num_uses; n2++)
1193 {
1194 struct web *web2 = use2web[DF_REF_ID (info.uses[n2])];
1195 if (supweb == find_web_for_subweb (web2))
1196 {
1197 is_non_def = 1;
1198 break;
1199 }
1200 }
1201 if (is_non_def)
1202 continue;
1203
1204 if (!is_partly_live (ri.live, supweb))
1205 bitmap_set_bit (useless_defs, DF_REF_ID (ref));
1206
1207 RESET_BIT (ri.live, web->id);
1208 if (bitmap_bit_p (ri.need_reload, web->id))
1209 {
1210 ri.num_reloads--;
1211 bitmap_clear_bit (ri.need_reload, web->id);
1212 }
1213 if (web != supweb)
1214 {
1215 /* XXX subwebs aren't precisely tracked here. We have
1216 everything we need (inverse webs), but the code isn't
1217 yet written. We need to make all completely
1218 overlapping web parts non-live here. */
1219 /* If by luck now the whole web isn't live anymore, no
1220 reloads for it are needed. */
1221 if (!is_partly_live (ri.live, supweb)
1222 && bitmap_bit_p (ri.need_reload, supweb->id))
1223 {
1224 ri.num_reloads--;
1225 bitmap_clear_bit (ri.need_reload, supweb->id);
1226 }
1227 }
1228 else
1229 {
1230 struct web *sweb;
1231 /* If the whole web is defined here, no parts of it are
1232 live anymore and no reloads are needed for them. */
1233 for (sweb = supweb->subreg_next; sweb;
1234 sweb = sweb->subreg_next)
1235 {
1236 RESET_BIT (ri.live, sweb->id);
1237 if (bitmap_bit_p (ri.need_reload, sweb->id))
1238 {
1239 ri.num_reloads--;
1240 bitmap_clear_bit (ri.need_reload, sweb->id);
1241 }
1242 }
1243 }
1244 if (alias (supweb)->type != SPILLED)
1245 update_spill_colors (&(ri.colors_in_use), web, 0);
1246 }
1247
1248 nl_first_reload = ri.nl_size;
1249
1250 /* CALL_INSNs are not really deaths, but still more registers
1251 are free after a call, than before.
1252 XXX Note, that sometimes reload barfs when we emit insns between
1253 a call and the insn which copies the return register into a
1254 pseudo. */
4b4bf941 1255 if (CALL_P (insn))
ed8d2920
MM
1256 ri.need_load = 1;
1257 else if (INSN_P (insn))
1258 for (n = 0; n < info.num_uses; n++)
1259 {
1260 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1261 struct web *supweb = find_web_for_subweb (web);
1262 int is_death;
1263 if (supweb->type == PRECOLORED
1264 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1265 continue;
1266 is_death = !TEST_BIT (ri.live, supweb->id);
1267 is_death &= !TEST_BIT (ri.live, web->id);
1268 if (is_death)
1269 {
1270 ri.need_load = 1;
1271 bitmap_set_bit (new_deaths, INSN_UID (insn));
1272 break;
1273 }
1274 }
1275
1276 if (INSN_P (insn) && ri.num_reloads)
1277 {
1278 int old_num_reloads = ri.num_reloads;
1279 reloads_to_loads (&ri, info.uses, info.num_uses, use2web);
1280
1281 /* If this insn sets a pseudo, which isn't used later
1282 (i.e. wasn't live before) it is a dead store. We need
1283 to emit all reloads which have the same color as this def.
1284 We don't need to check for non-liveness here to detect
1285 the deadness (it anyway is too late, as we already cleared
1286 the liveness in the first loop over the defs), because if it
1287 _would_ be live here, no reload could have that color, as
1288 they would already have been converted to a load. */
1289 if (ri.num_reloads)
1290 reloads_to_loads (&ri, info.defs, info.num_defs, def2web);
1291 if (ri.num_reloads != old_num_reloads && !ri.need_load)
1292 ri.need_load = 1;
1293 }
1294
1295 if (ri.nl_size && (ri.need_load || ri.any_spilltemps_spilled))
1296 emit_loads (&ri, nl_first_reload, last_block_insn);
1297
1298 if (INSN_P (insn) && flag_ra_ir_spilling)
1299 for (n = 0; n < info.num_uses; n++)
1300 {
1301 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1302 struct web *aweb = alias (find_web_for_subweb (web));
1303 if (aweb->type != SPILLED)
1304 update_spill_colors (&(ri.colors_in_use), web, 1);
1305 }
1306
1307 ri.any_spilltemps_spilled = 0;
1308 if (INSN_P (insn))
1309 for (n = 0; n < info.num_uses; n++)
1310 {
1311 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1312 struct web *supweb = find_web_for_subweb (web);
1313 struct web *aweb = alias (supweb);
1314 SET_BIT (ri.live, web->id);
1315 if (aweb->type != SPILLED)
1316 continue;
1317 if (supweb->spill_temp)
1318 ri.any_spilltemps_spilled = 1;
1319 web->last_use_insn = insn;
1320 if (!web->in_load)
1321 {
1322 if (spill_is_free (&(ri.colors_in_use), aweb) <= 0
1323 || !flag_ra_ir_spilling)
1324 {
1325 ri.needed_loads[ri.nl_size++] = web;
1326 web->in_load = 1;
1327 web->one_load = 1;
1328 }
1329 else if (!bitmap_bit_p (ri.need_reload, web->id))
1330 {
1331 bitmap_set_bit (ri.need_reload, web->id);
1332 ri.num_reloads++;
1333 web->one_load = 1;
1334 }
1335 else
1336 web->one_load = 0;
1337 }
1338 else
1339 web->one_load = 0;
1340 }
1341
4b4bf941 1342 if (LABEL_P (insn))
ed8d2920
MM
1343 break;
1344 }
1345
1346 nl_first_reload = ri.nl_size;
1347 if (ri.num_reloads)
1348 {
1349 int in_ir = 0;
1350 edge e;
1351 int num = 0;
1352 HARD_REG_SET cum_colors, colors;
1353 CLEAR_HARD_REG_SET (cum_colors);
1354 for (e = bb->pred; e && num < 5; e = e->pred_next, num++)
1355 {
1356 int j;
1357 CLEAR_HARD_REG_SET (colors);
1358 EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j,
1359 {
1360 struct web *web = use2web[j];
1361 struct web *aweb = alias (find_web_for_subweb (web));
1362 if (aweb->type != SPILLED)
1363 update_spill_colors (&colors, web, 1);
1364 });
1365 IOR_HARD_REG_SET (cum_colors, colors);
1366 }
1367 if (num == 5)
1368 in_ir = 1;
1369
1370 bitmap_clear (ri.scratch);
1371 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
1372 {
1373 struct web *web2 = ID2WEB (j);
1374 struct web *supweb2 = find_web_for_subweb (web2);
1375 struct web *aweb2 = alias (supweb2);
1376 /* block entry is IR boundary for aweb2?
1377 Currently more some tries for good conditions. */
1378 if (((ra_pass > 0 || supweb2->target_of_spilled_move)
1379 && (1 || in_ir || spill_is_free (&cum_colors, aweb2) <= 0))
1380 || (ra_pass == 1
1381 && (in_ir
1382 || spill_is_free (&cum_colors, aweb2) <= 0)))
1383 {
1384 if (!web2->in_load)
1385 {
1386 ri.needed_loads[ri.nl_size++] = web2;
1387 web2->in_load = 1;
1388 }
1389 bitmap_set_bit (ri.scratch, j);
1390 ri.num_reloads--;
1391 }
1392 });
1393 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1394 BITMAP_AND_COMPL);
1395 }
1396
1397 ri.need_load = 1;
1398 emit_loads (&ri, nl_first_reload, last_block_insn);
1399 if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/)
1400 abort ();
1401 if (!insn)
1402 break;
1403 }
1404 free (ri.needed_loads);
1405 sbitmap_free (ri.live);
1406 BITMAP_XFREE (ri.scratch);
1407 BITMAP_XFREE (ri.need_reload);
1408}
1409
1410/* WEBS is a web conflicting with a spilled one. Prepare it
1411 to be able to rescan it in the next pass. Mark all it's uses
1412 for checking, and clear the some members of their web parts
1413 (of defs and uses). Notably don't clear the uplink. We don't
1414 change the layout of this web, just it's conflicts.
1415 Also remember all IDs of its uses in USES_AS_BITMAP. */
1416
1417static void
93bad80e 1418mark_refs_for_checking (struct web *web, bitmap uses_as_bitmap)
ed8d2920
MM
1419{
1420 unsigned int i;
1421 for (i = 0; i < web->num_uses; i++)
1422 {
1423 unsigned int id = DF_REF_ID (web->uses[i]);
1424 SET_BIT (last_check_uses, id);
1425 bitmap_set_bit (uses_as_bitmap, id);
1426 web_parts[df->def_id + id].spanned_deaths = 0;
1427 web_parts[df->def_id + id].crosses_call = 0;
1428 }
1429 for (i = 0; i < web->num_defs; i++)
1430 {
1431 unsigned int id = DF_REF_ID (web->defs[i]);
1432 web_parts[id].spanned_deaths = 0;
1433 web_parts[id].crosses_call = 0;
1434 }
1435}
1436
1437/* The last step of the spill phase is to set up the structures for
1438 incrementally rebuilding the interference graph. We break up
1439 the web part structure of all spilled webs, mark their uses for
1440 rechecking, look at their neighbors, and clean up some global
1441 information, we will rebuild. */
1442
1443static void
93bad80e 1444detect_web_parts_to_rebuild (void)
ed8d2920
MM
1445{
1446 bitmap uses_as_bitmap;
1447 unsigned int i, pass;
1448 struct dlist *d;
1449 sbitmap already_webs = sbitmap_alloc (num_webs);
1450
1451 uses_as_bitmap = BITMAP_XMALLOC ();
1452 if (last_check_uses)
1453 sbitmap_free (last_check_uses);
1454 last_check_uses = sbitmap_alloc (df->use_id);
1455 sbitmap_zero (last_check_uses);
1456 sbitmap_zero (already_webs);
1457 /* We need to recheck all uses of all webs involved in spilling (and the
1458 uses added by spill insns, but those are not analyzed yet).
3d042e77 1459 Those are the spilled webs themselves, webs coalesced to spilled ones,
ed8d2920
MM
1460 and webs conflicting with any of them. */
1461 for (pass = 0; pass < 2; pass++)
1462 for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1463 {
1464 struct web *web = DLIST_WEB (d);
1465 struct conflict_link *wl;
1466 unsigned int j;
1467 /* This check is only needed for coalesced nodes, but hey. */
1468 if (alias (web)->type != SPILLED)
1469 continue;
1470
1471 /* For the spilled web itself we also need to clear it's
1472 uplink, to be able to rebuild smaller webs. After all
1473 spilling has split the web. */
1474 for (i = 0; i < web->num_uses; i++)
1475 {
1476 unsigned int id = DF_REF_ID (web->uses[i]);
1477 SET_BIT (last_check_uses, id);
1478 bitmap_set_bit (uses_as_bitmap, id);
1479 web_parts[df->def_id + id].uplink = NULL;
1480 web_parts[df->def_id + id].spanned_deaths = 0;
1481 web_parts[df->def_id + id].crosses_call = 0;
1482 }
1483 for (i = 0; i < web->num_defs; i++)
1484 {
1485 unsigned int id = DF_REF_ID (web->defs[i]);
1486 web_parts[id].uplink = NULL;
1487 web_parts[id].spanned_deaths = 0;
1488 web_parts[id].crosses_call = 0;
1489 }
1490
1491 /* Now look at all neighbors of this spilled web. */
1492 if (web->have_orig_conflicts)
1493 wl = web->orig_conflict_list;
1494 else
1495 wl = web->conflict_list;
1496 for (; wl; wl = wl->next)
1497 {
1498 if (TEST_BIT (already_webs, wl->t->id))
1499 continue;
1500 SET_BIT (already_webs, wl->t->id);
1501 mark_refs_for_checking (wl->t, uses_as_bitmap);
1502 }
1503 EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j,
1504 {
1505 struct web *web2 = ID2WEB (j);
1506 if (TEST_BIT (already_webs, web2->id))
1507 continue;
1508 SET_BIT (already_webs, web2->id);
1509 mark_refs_for_checking (web2, uses_as_bitmap);
1510 });
1511 }
1512
1513 /* We also recheck unconditionally all uses of any hardregs. This means
1514 we _can_ delete all these uses from the live_at_end[] bitmaps.
e0bb17a8 1515 And because we sometimes delete insn referring to hardregs (when
ed8d2920
MM
1516 they became useless because they setup a rematerializable pseudo, which
1517 then was rematerialized), some of those uses will go away with the next
b57f2e10 1518 df_analyze(). This means we even _must_ delete those uses from
ed8d2920
MM
1519 the live_at_end[] bitmaps. For simplicity we simply delete
1520 all of them. */
1521 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1522 if (!fixed_regs[i])
1523 {
1524 struct df_link *link;
1525 for (link = df->regs[i].uses; link; link = link->next)
1526 if (link->ref)
1527 bitmap_set_bit (uses_as_bitmap, DF_REF_ID (link->ref));
1528 }
1529
1530 /* The information in live_at_end[] will be rebuild for all uses
1531 we recheck, so clear it here (the uses of spilled webs, might
1532 indeed not become member of it again). */
1533 live_at_end -= 2;
1534 for (i = 0; i < (unsigned int) last_basic_block + 2; i++)
1535 bitmap_operation (live_at_end[i], live_at_end[i], uses_as_bitmap,
1536 BITMAP_AND_COMPL);
1537 live_at_end += 2;
1538
c263766c 1539 if (dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
ed8d2920
MM
1540 {
1541 ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n");
c263766c 1542 dump_sbitmap_file (dump_file, last_check_uses);
ed8d2920
MM
1543 }
1544 sbitmap_free (already_webs);
1545 BITMAP_XFREE (uses_as_bitmap);
1546}
1547
1548/* Statistics about deleted insns, which are useless now. */
1549static unsigned int deleted_def_insns;
1550static unsigned HOST_WIDE_INT deleted_def_cost;
1551
1552/* In rewrite_program2() we noticed, when a certain insn set a pseudo
1553 which wasn't live. Try to delete all those insns. */
1554
1555static void
93bad80e 1556delete_useless_defs (void)
ed8d2920
MM
1557{
1558 unsigned int i;
1559 /* If the insn only sets the def without any sideeffect (besides
1560 clobbers or uses), we can delete it. single_set() also tests
1561 for INSN_P(insn). */
1562 EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i,
1563 {
1564 rtx insn = DF_REF_INSN (df->defs[i]);
1565 rtx set = single_set (insn);
1566 struct web *web = find_web_for_subweb (def2web[i]);
1567 if (set && web->type == SPILLED && web->stack_slot == NULL)
1568 {
1569 deleted_def_insns++;
1570 deleted_def_cost += BLOCK_FOR_INSN (insn)->frequency + 1;
1571 PUT_CODE (insn, NOTE);
1572 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
1573 df_insn_modify (df, BLOCK_FOR_INSN (insn), insn);
1574 }
1575 });
1576}
1577
1578/* Look for spilled webs, on whose behalf no insns were emitted.
1579 We inversify (sp?) the changed flag of the webs, so after this function
1580 a nonzero changed flag means, that this web was not spillable (at least
1581 in this pass). */
1582
1583static void
93bad80e 1584detect_non_changed_webs (void)
ed8d2920
MM
1585{
1586 struct dlist *d, *d_next;
1587 for (d = WEBS(SPILLED); d; d = d_next)
1588 {
1589 struct web *web = DLIST_WEB (d);
1590 d_next = d->next;
1591 if (!web->changed)
1592 {
1593 ra_debug_msg (DUMP_PROCESS, "no insns emitted for spilled web %d\n",
1594 web->id);
1595 remove_web_from_list (web);
1596 put_web (web, COLORED);
1597 web->changed = 1;
1598 }
1599 else
1600 web->changed = 0;
1601 /* From now on web->changed is used as the opposite flag.
1602 I.e. colored webs, which have changed set were formerly
1603 spilled webs for which no insns were emitted. */
1604 }
1605}
1606
1607/* Before spilling we clear the changed flags for all spilled webs. */
1608
1609static void
93bad80e 1610reset_changed_flag (void)
ed8d2920
MM
1611{
1612 struct dlist *d;
1613 for (d = WEBS(SPILLED); d; d = d->next)
1614 DLIST_WEB(d)->changed = 0;
1615}
1616
1617/* The toplevel function for this file. Given a colorized graph,
1618 and lists of spilled, coalesced and colored webs, we add some
1619 spill code. This also sets up the structures for incrementally
1620 building the interference graph in the next pass. */
1621
1622void
93bad80e 1623actual_spill (void)
ed8d2920
MM
1624{
1625 int i;
1626 bitmap new_deaths = BITMAP_XMALLOC ();
1627 reset_changed_flag ();
1628 spill_coalprop ();
1629 choose_spill_colors ();
1630 useless_defs = BITMAP_XMALLOC ();
1631 if (flag_ra_improved_spilling)
1632 rewrite_program2 (new_deaths);
1633 else
1634 rewrite_program (new_deaths);
1635 insert_stores (new_deaths);
1636 delete_useless_defs ();
1637 BITMAP_XFREE (useless_defs);
1638 sbitmap_free (insns_with_deaths);
1639 insns_with_deaths = sbitmap_alloc (get_max_uid ());
1640 death_insns_max_uid = get_max_uid ();
1641 sbitmap_zero (insns_with_deaths);
1642 EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i,
1643 { SET_BIT (insns_with_deaths, i);});
1644 detect_non_changed_webs ();
1645 detect_web_parts_to_rebuild ();
1646 BITMAP_XFREE (new_deaths);
1647}
1648
1649/* A bitmap of pseudo reg numbers which are coalesced directly
1650 to a hardreg. Set in emit_colors(), used and freed in
1651 remove_suspicious_death_notes(). */
1652static bitmap regnos_coalesced_to_hardregs;
1653
1654/* Create new pseudos for each web we colored, change insns to
1655 use those pseudos and set up ra_reg_renumber. */
1656
1657void
edaf3e03 1658emit_colors (struct df *df)
ed8d2920
MM
1659{
1660 unsigned int i;
1661 int si;
1662 struct web *web;
1663 int old_max_regno = max_reg_num ();
1664 regset old_regs;
1665 basic_block bb;
1666
1667 /* This bitmap is freed in remove_suspicious_death_notes(),
1668 which is also the user of it. */
1669 regnos_coalesced_to_hardregs = BITMAP_XMALLOC ();
1670 /* First create the (REG xx) rtx's for all webs, as we need to know
1671 the number, to make sure, flow has enough memory for them in the
1672 various tables. */
1673 for (i = 0; i < num_webs - num_subwebs; i++)
1674 {
1675 web = ID2WEB (i);
1676 if (web->type != COLORED && web->type != COALESCED)
1677 continue;
1678 if (web->type == COALESCED && alias (web)->type == COLORED)
1679 continue;
1680 if (web->reg_rtx || web->regno < FIRST_PSEUDO_REGISTER)
1681 abort ();
1682
1683 if (web->regno >= max_normal_pseudo)
1684 {
1685 rtx place;
1686 if (web->color == an_unusable_color)
1687 {
1688 unsigned int inherent_size = PSEUDO_REGNO_BYTES (web->regno);
1689 unsigned int total_size = MAX (inherent_size, 0);
1690 place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno),
1691 total_size,
1692 inherent_size == total_size ? 0 : -1);
1693 RTX_UNCHANGING_P (place) =
1694 RTX_UNCHANGING_P (regno_reg_rtx[web->regno]);
1695 set_mem_alias_set (place, new_alias_set ());
1696 }
1697 else
1698 {
1699 place = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1700 }
1701 web->reg_rtx = place;
1702 }
1703 else
1704 {
1705 /* Special case for i386 'fix_truncdi_nomemory' insn.
1706 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1707 Actual only for clobbered register. */
1708 if (web->num_uses == 0 && web->num_defs == 1)
1709 web->reg_rtx = gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web->defs[0])));
1710 else
1711 web->reg_rtx = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1712 /* Remember the different parts directly coalesced to a hardreg. */
1713 if (web->type == COALESCED)
1714 bitmap_set_bit (regnos_coalesced_to_hardregs, REGNO (web->reg_rtx));
1715 }
1716 }
1717 ra_max_regno = max_regno = max_reg_num ();
1718 allocate_reg_info (max_regno, FALSE, FALSE);
703ad42b 1719 ra_reg_renumber = xmalloc (max_regno * sizeof (short));
ed8d2920
MM
1720 for (si = 0; si < max_regno; si++)
1721 ra_reg_renumber[si] = -1;
1722
1723 /* Then go through all references, and replace them by a new
1724 pseudoreg for each web. All uses. */
1725 /* XXX
1726 Beware: The order of replacements (first uses, then defs) matters only
1727 for read-mod-write insns, where the RTL expression for the REG is
1728 shared between def and use. For normal rmw insns we connected all such
1729 webs, i.e. both the use and the def (which are the same memory)
1730 there get the same new pseudo-reg, so order would not matter.
1731 _However_ we did not connect webs, were the read cycle was an
1732 uninitialized read. If we now would first replace the def reference
1733 and then the use ref, we would initialize it with a REG rtx, which
1734 gets never initialized, and yet more wrong, which would overwrite
1735 the definition of the other REG rtx. So we must replace the defs last.
1736 */
1737 for (i = 0; i < df->use_id; i++)
1738 if (df->uses[i])
1739 {
1740 regset rs = DF_REF_BB (df->uses[i])->global_live_at_start;
1741 rtx regrtx;
1742 web = use2web[i];
1743 web = find_web_for_subweb (web);
1744 if (web->type != COLORED && web->type != COALESCED)
1745 continue;
1746 regrtx = alias (web)->reg_rtx;
1747 if (!regrtx)
1748 regrtx = web->reg_rtx;
1749 *DF_REF_REAL_LOC (df->uses[i]) = regrtx;
1750 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1751 {
1752 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1753 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1754 }
1755 }
1756
1757 /* And all defs. */
1758 for (i = 0; i < df->def_id; i++)
1759 {
1760 regset rs;
1761 rtx regrtx;
1762 if (!df->defs[i])
1763 continue;
1764 rs = DF_REF_BB (df->defs[i])->global_live_at_start;
1765 web = def2web[i];
1766 web = find_web_for_subweb (web);
1767 if (web->type != COLORED && web->type != COALESCED)
1768 continue;
1769 regrtx = alias (web)->reg_rtx;
1770 if (!regrtx)
1771 regrtx = web->reg_rtx;
1772 *DF_REF_REAL_LOC (df->defs[i]) = regrtx;
1773 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1774 {
1775 /* Don't simply clear the current regno, as it might be
1776 replaced by two webs. */
1777 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1778 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1779 }
1780 }
1781
1782 /* And now set up the ra_reg_renumber array for reload with all the new
1783 pseudo-regs. */
1784 for (i = 0; i < num_webs - num_subwebs; i++)
1785 {
1786 web = ID2WEB (i);
1787 if (web->reg_rtx && REG_P (web->reg_rtx))
1788 {
1789 int r = REGNO (web->reg_rtx);
1790 ra_reg_renumber[r] = web->color;
1791 ra_debug_msg (DUMP_COLORIZE, "Renumber pseudo %d (== web %d) to %d\n",
1792 r, web->id, ra_reg_renumber[r]);
1793 }
1794 }
1795
1796 old_regs = BITMAP_XMALLOC ();
1797 for (si = FIRST_PSEUDO_REGISTER; si < old_max_regno; si++)
1798 SET_REGNO_REG_SET (old_regs, si);
1799 FOR_EACH_BB (bb)
1800 {
1801 AND_COMPL_REG_SET (bb->global_live_at_start, old_regs);
1802 AND_COMPL_REG_SET (bb->global_live_at_end, old_regs);
1803 }
1804 BITMAP_XFREE (old_regs);
1805}
1806
1807/* Delete some coalesced moves from the insn stream. */
1808
1809void
93bad80e 1810delete_moves (void)
ed8d2920
MM
1811{
1812 struct move_list *ml;
1813 struct web *s, *t;
1814 /* XXX Beware: We normally would test here each copy insn, if
1815 source and target got the same color (either by coalescing or by pure
1816 luck), and then delete it.
1817 This will currently not work. One problem is, that we don't color
1818 the regs ourself, but instead defer to reload. So the colorization
1819 is only a kind of suggestion, which reload doesn't have to follow.
1820 For webs which are coalesced to a normal colored web, we only have one
1821 new pseudo, so in this case we indeed can delete copy insns involving
1822 those (because even if reload colors them different from our suggestion,
1823 it still has to color them the same, as only one pseudo exists). But for
1824 webs coalesced to precolored ones, we have not a single pseudo, but
1825 instead one for each coalesced web. This means, that we can't delete
1826 copy insns, where source and target are webs coalesced to precolored
1827 ones, because then the connection between both webs is destroyed. Note
1828 that this not only means copy insns, where one side is the precolored one
1829 itself, but also those between webs which are coalesced to one color.
1830 Also because reload we can't delete copy insns which involve any
1831 precolored web at all. These often have also special meaning (e.g.
1832 copying a return value of a call to a pseudo, or copying pseudo to the
1833 return register), and the deletion would confuse reload in thinking the
1834 pseudo isn't needed. One of those days reload will get away and we can
1835 do everything we want.
1836 In effect because of the later reload, we can't base our deletion on the
1837 colors itself, but instead need to base them on the newly created
1838 pseudos. */
1839 for (ml = wl_moves; ml; ml = ml->next)
1840 /* The real condition we would ideally use is: s->color == t->color.
1841 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1842 we want to prevent deletion of "special" copies. */
1843 if (ml->move
1844 && (s = alias (ml->move->source_web))->reg_rtx
1845 == (t = alias (ml->move->target_web))->reg_rtx
1846 && s->type != PRECOLORED && t->type != PRECOLORED)
1847 {
1848 basic_block bb = BLOCK_FOR_INSN (ml->move->insn);
1849 df_insn_delete (df, bb, ml->move->insn);
1850 deleted_move_insns++;
1851 deleted_move_cost += bb->frequency + 1;
1852 }
1853}
1854
d55d8fc7 1855/* Due to reasons documented elsewhere we create different pseudos
ed8d2920
MM
1856 for all webs coalesced to hardregs. For these parts life_analysis()
1857 might have added REG_DEAD notes without considering, that only this part
1858 but not the whole coalesced web dies. The RTL is correct, there is no
1859 coalescing yet. But if later reload's alter_reg() substitutes the
1860 hardreg into the REG rtx it looks like that particular hardreg dies here,
1861 although (due to coalescing) it still is live. This might make different
1862 places of reload think, it can use that hardreg for reload regs,
1863 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1864 (Or better teach life_analysis() and reload about our coalescing, but
1865 that comes later) Bah. */
1866
1867void
93bad80e 1868remove_suspicious_death_notes (void)
ed8d2920
MM
1869{
1870 rtx insn;
1871 for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
1872 if (INSN_P (insn))
1873 {
1874 rtx *pnote = &REG_NOTES (insn);
1875 while (*pnote)
1876 {
1877 rtx note = *pnote;
1878 if ((REG_NOTE_KIND (note) == REG_DEAD
1879 || REG_NOTE_KIND (note) == REG_UNUSED)
f8cfc6aa 1880 && (REG_P (XEXP (note, 0))
ed8d2920
MM
1881 && bitmap_bit_p (regnos_coalesced_to_hardregs,
1882 REGNO (XEXP (note, 0)))))
1883 *pnote = XEXP (note, 1);
1884 else
1885 pnote = &XEXP (*pnote, 1);
1886 }
1887 }
1888 BITMAP_XFREE (regnos_coalesced_to_hardregs);
1889 regnos_coalesced_to_hardregs = NULL;
1890}
1891
1892/* Allocate space for max_reg_num() pseudo registers, and
1893 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1894 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1895
1896void
93bad80e 1897setup_renumber (int free_it)
ed8d2920
MM
1898{
1899 int i;
1900 max_regno = max_reg_num ();
1901 allocate_reg_info (max_regno, FALSE, TRUE);
1902 for (i = 0; i < max_regno; i++)
1903 {
1904 reg_renumber[i] = (i < ra_max_regno) ? ra_reg_renumber[i] : -1;
1905 }
1906 if (free_it)
1907 {
1908 free (ra_reg_renumber);
1909 ra_reg_renumber = NULL;
1910 ra_max_regno = 0;
1911 }
1912}
1913
1914/* Dump the costs and savings due to spilling, i.e. of added spill insns
1915 and removed moves or useless defs. */
1916
1917void
93bad80e 1918dump_cost (unsigned int level)
ed8d2920 1919{
ed8d2920 1920 ra_debug_msg (level, "Instructions for spilling\n added:\n");
90ff44cf
KG
1921 ra_debug_msg (level, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1922 emitted_spill_loads, spill_load_cost);
1923 ra_debug_msg (level, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1924 emitted_spill_stores, spill_store_cost);
1925 ra_debug_msg (level, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1926 emitted_remat, spill_remat_cost);
1927 ra_debug_msg (level, " removed:\n moves =%d cost="
1928 HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1929 deleted_move_insns, deleted_move_cost);
1930 ra_debug_msg (level, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1931 deleted_def_insns, deleted_def_cost);
ed8d2920
MM
1932}
1933
1934/* Initialization of the rewrite phase. */
1935
1936void
93bad80e 1937ra_rewrite_init (void)
ed8d2920
MM
1938{
1939 emitted_spill_loads = 0;
1940 emitted_spill_stores = 0;
1941 emitted_remat = 0;
1942 spill_load_cost = 0;
1943 spill_store_cost = 0;
1944 spill_remat_cost = 0;
1945 deleted_move_insns = 0;
1946 deleted_move_cost = 0;
1947 deleted_def_insns = 0;
1948 deleted_def_cost = 0;
1949}
1950
1951/*
1952vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
1953*/