]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/df-scan.c
* decl.c (duplicate_decls): Verify namespace names are unique.
[thirdparty/gcc.git] / gcc / df-scan.c
CommitLineData
e011eba9 1/* FIXME: We need to go back and add the warning messages about code
2 moved across setjmp. */
3
4
5/* Scanning of rtl for dataflow analysis.
6 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
7 Free Software Foundation, Inc.
8 Originally contributed by Michael P. Hayes
9 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
10 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
11 and Kenneth Zadeck (zadeck@naturalbridge.com).
12
13This file is part of GCC.
14
15GCC is free software; you can redistribute it and/or modify it under
16the terms of the GNU General Public License as published by the Free
17Software Foundation; either version 2, or (at your option) any later
18version.
19
20GCC is distributed in the hope that it will be useful, but WITHOUT ANY
21WARRANTY; without even the implied warranty of MERCHANTABILITY or
22FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23for more details.
24
25You should have received a copy of the GNU General Public License
26along with GCC; see the file COPYING. If not, write to the Free
27Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2802110-1301, USA.
29*/
30
31#include "config.h"
32#include "system.h"
33#include "coretypes.h"
34#include "tm.h"
35#include "rtl.h"
36#include "tm_p.h"
37#include "insn-config.h"
38#include "recog.h"
39#include "function.h"
40#include "regs.h"
41#include "output.h"
42#include "alloc-pool.h"
43#include "flags.h"
44#include "hard-reg-set.h"
45#include "basic-block.h"
46#include "sbitmap.h"
47#include "bitmap.h"
48#include "timevar.h"
fcf2ad9f 49#include "tree.h"
50#include "target.h"
51#include "target-def.h"
e011eba9 52#include "df.h"
53
54#ifndef HAVE_epilogue
55#define HAVE_epilogue 0
56#endif
57#ifndef HAVE_prologue
58#define HAVE_prologue 0
59#endif
60#ifndef HAVE_sibcall_epilogue
61#define HAVE_sibcall_epilogue 0
62#endif
63
64#ifndef EPILOGUE_USES
65#define EPILOGUE_USES(REGNO) 0
66#endif
67
e011eba9 68/* The bitmap_obstack is used to hold some static variables that
69 should not be reset after each function is compiled. */
70
71static bitmap_obstack persistent_obstack;
72
73/* The set of hard registers in eliminables[i].from. */
74
75static HARD_REG_SET elim_reg_set;
76
77/* This is a bitmap copy of regs_invalidated_by_call so that we can
78 easily add it into bitmaps, etc. */
79
80bitmap df_invalidated_by_call = NULL;
81
82/* Initialize ur_in and ur_out as if all hard registers were partially
83 available. */
84
e011eba9 85static void df_ref_record (struct dataflow *, rtx, rtx *,
86 basic_block, rtx, enum df_ref_type,
87 enum df_ref_flags, bool record_live);
88static void df_def_record_1 (struct dataflow *, rtx, basic_block, rtx,
89 enum df_ref_flags, bool record_live);
90static void df_defs_record (struct dataflow *, rtx, basic_block, rtx);
91static void df_uses_record (struct dataflow *, rtx *, enum df_ref_type,
92 basic_block, rtx, enum df_ref_flags);
93
94static void df_insn_refs_record (struct dataflow *, basic_block, rtx);
95static void df_bb_refs_record (struct dataflow *, basic_block);
96static void df_refs_record (struct dataflow *, bitmap);
97static struct df_ref *df_ref_create_structure (struct dataflow *, rtx, rtx *,
98 basic_block, rtx, enum df_ref_type,
99 enum df_ref_flags);
fcf2ad9f 100static void df_record_entry_block_defs (struct dataflow *);
e011eba9 101static void df_record_exit_block_uses (struct dataflow *);
102static void df_grow_reg_info (struct dataflow *, struct df_ref_info *);
103static void df_grow_ref_info (struct df_ref_info *, unsigned int);
104static void df_grow_insn_info (struct df *);
105
106\f
107/*----------------------------------------------------------------------------
108 SCANNING DATAFLOW PROBLEM
109
110 There are several ways in which scanning looks just like the other
111 dataflow problems. It shares the all the mechanisms for local info
112 as well as basic block info. Where it differs is when and how often
113 it gets run. It also has no need for the iterative solver.
114----------------------------------------------------------------------------*/
115
116/* Problem data for the scanning dataflow function. */
117struct df_scan_problem_data
118{
119 alloc_pool ref_pool;
120 alloc_pool insn_pool;
121 alloc_pool reg_pool;
3e6933a8 122 alloc_pool mw_reg_pool;
123 alloc_pool mw_link_pool;
e011eba9 124};
125
126typedef struct df_scan_bb_info *df_scan_bb_info_t;
127
128static void
129df_scan_free_internal (struct dataflow *dflow)
130{
131 struct df *df = dflow->df;
3e6933a8 132 struct df_scan_problem_data *problem_data
133 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 134
135 free (df->def_info.regs);
136 free (df->def_info.refs);
137 memset (&df->def_info, 0, (sizeof (struct df_ref_info)));
138
139 free (df->use_info.regs);
140 free (df->use_info.refs);
141 memset (&df->use_info, 0, (sizeof (struct df_ref_info)));
142
143 free (df->insns);
144 df->insns = NULL;
145 df->insns_size = 0;
146
147 free (dflow->block_info);
148 dflow->block_info = NULL;
149 dflow->block_info_size = 0;
150
151 BITMAP_FREE (df->hardware_regs_used);
fcf2ad9f 152 BITMAP_FREE (df->entry_block_defs);
e011eba9 153 BITMAP_FREE (df->exit_block_uses);
154
155 free_alloc_pool (dflow->block_pool);
156 free_alloc_pool (problem_data->ref_pool);
157 free_alloc_pool (problem_data->insn_pool);
158 free_alloc_pool (problem_data->reg_pool);
3e6933a8 159 free_alloc_pool (problem_data->mw_reg_pool);
160 free_alloc_pool (problem_data->mw_link_pool);
e011eba9 161}
162
163
164/* Get basic block info. */
165
166struct df_scan_bb_info *
167df_scan_get_bb_info (struct dataflow *dflow, unsigned int index)
168{
169 gcc_assert (index < dflow->block_info_size);
170 return (struct df_scan_bb_info *) dflow->block_info[index];
171}
172
173
174/* Set basic block info. */
175
176static void
177df_scan_set_bb_info (struct dataflow *dflow, unsigned int index,
178 struct df_scan_bb_info *bb_info)
179{
180 gcc_assert (index < dflow->block_info_size);
181 dflow->block_info[index] = (void *) bb_info;
182}
183
184
185/* Free basic block info. */
186
187static void
d0802b39 188df_scan_free_bb_info (struct dataflow *dflow, basic_block bb, void *vbb_info)
e011eba9 189{
190 struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info;
191 if (bb_info)
d0802b39 192 {
193 df_bb_refs_delete (dflow, bb->index);
194 pool_free (dflow->block_pool, bb_info);
195 }
e011eba9 196}
197
198
199/* Allocate the problem data for the scanning problem. This should be
200 called when the problem is created or when the entire function is to
201 be rescanned. */
202
203static void
3e6933a8 204df_scan_alloc (struct dataflow *dflow, bitmap blocks_to_rescan,
205 bitmap all_blocks ATTRIBUTE_UNUSED)
e011eba9 206{
207 struct df *df = dflow->df;
208 struct df_scan_problem_data *problem_data;
209 unsigned int insn_num = get_max_uid () + 1;
210 unsigned int block_size = 50;
211 unsigned int bb_index;
212 bitmap_iterator bi;
213
214 /* Given the number of pools, this is really faster than tearing
215 everything apart. */
216 if (dflow->problem_data)
217 df_scan_free_internal (dflow);
218
219 dflow->block_pool
220 = create_alloc_pool ("df_scan_block pool",
221 sizeof (struct df_scan_bb_info),
222 block_size);
223
4c36ffe6 224 problem_data = XNEW (struct df_scan_problem_data);
e011eba9 225 dflow->problem_data = problem_data;
226
227 problem_data->ref_pool
228 = create_alloc_pool ("df_scan_ref pool",
229 sizeof (struct df_ref), block_size);
230 problem_data->insn_pool
231 = create_alloc_pool ("df_scan_insn pool",
232 sizeof (struct df_insn_info), block_size);
e011eba9 233 problem_data->reg_pool
234 = create_alloc_pool ("df_scan_reg pool",
235 sizeof (struct df_reg_info), block_size);
3e6933a8 236 problem_data->mw_reg_pool
237 = create_alloc_pool ("df_scan_mw_reg pool",
238 sizeof (struct df_mw_hardreg), block_size);
239 problem_data->mw_link_pool
240 = create_alloc_pool ("df_scan_mw_link pool",
241 sizeof (struct df_link), block_size);
e011eba9 242
243 insn_num += insn_num / 4;
244 df_grow_reg_info (dflow, &df->def_info);
245 df_grow_ref_info (&df->def_info, insn_num);
246
247 df_grow_reg_info (dflow, &df->use_info);
248 df_grow_ref_info (&df->use_info, insn_num *2);
249
250 df_grow_insn_info (df);
251 df_grow_bb_info (dflow);
252
253 EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
254 {
255 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb_index);
256 if (!bb_info)
257 {
258 bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
259 df_scan_set_bb_info (dflow, bb_index, bb_info);
260 }
261 bb_info->artificial_defs = NULL;
262 bb_info->artificial_uses = NULL;
263 }
264
265 df->hardware_regs_used = BITMAP_ALLOC (NULL);
fcf2ad9f 266 df->entry_block_defs = BITMAP_ALLOC (NULL);
e011eba9 267 df->exit_block_uses = BITMAP_ALLOC (NULL);
268}
269
270
271/* Free all of the data associated with the scan problem. */
272
273static void
274df_scan_free (struct dataflow *dflow)
275{
276 struct df *df = dflow->df;
277
d0802b39 278 if (dflow->problem_data)
279 {
280 df_scan_free_internal (dflow);
281 free (dflow->problem_data);
282 }
283
e011eba9 284 if (df->blocks_to_scan)
285 BITMAP_FREE (df->blocks_to_scan);
286
287 if (df->blocks_to_analyze)
288 BITMAP_FREE (df->blocks_to_analyze);
289
e011eba9 290 free (dflow);
291}
292
293static void
294df_scan_dump (struct dataflow *dflow ATTRIBUTE_UNUSED, FILE *file ATTRIBUTE_UNUSED)
295{
296 struct df *df = dflow->df;
297 int i;
298
e011eba9 299 fprintf (file, " invalidated by call \t");
300 dump_bitmap (file, df_invalidated_by_call);
301 fprintf (file, " hardware regs used \t");
302 dump_bitmap (file, df->hardware_regs_used);
fcf2ad9f 303 fprintf (file, " entry block uses \t");
304 dump_bitmap (file, df->entry_block_defs);
e011eba9 305 fprintf (file, " exit block uses \t");
306 dump_bitmap (file, df->exit_block_uses);
307 fprintf (file, " regs ever live \t");
308 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
309 if (regs_ever_live[i])
310 fprintf (file, "%d ", i);
311 fprintf (file, "\n");
312}
313
314static struct df_problem problem_SCAN =
315{
316 DF_SCAN, /* Problem id. */
317 DF_NONE, /* Direction. */
318 df_scan_alloc, /* Allocate the problem specific data. */
f64e6a69 319 NULL, /* Reset global information. */
e011eba9 320 df_scan_free_bb_info, /* Free basic block info. */
321 NULL, /* Local compute function. */
322 NULL, /* Init the solution specific data. */
323 NULL, /* Iterative solver. */
324 NULL, /* Confluence operator 0. */
325 NULL, /* Confluence operator n. */
326 NULL, /* Transfer function. */
327 NULL, /* Finalize function. */
328 df_scan_free, /* Free all of the problem information. */
329 df_scan_dump, /* Debugging. */
3e6933a8 330 NULL, /* Dependent problem. */
331 0 /* Changeable flags. */
e011eba9 332};
333
334
335/* Create a new DATAFLOW instance and add it to an existing instance
336 of DF. The returned structure is what is used to get at the
337 solution. */
338
339struct dataflow *
3e6933a8 340df_scan_add_problem (struct df *df, int flags)
e011eba9 341{
3e6933a8 342 return df_add_problem (df, &problem_SCAN, flags);
e011eba9 343}
344
345/*----------------------------------------------------------------------------
346 Storage Allocation Utilities
347----------------------------------------------------------------------------*/
348
349
350/* First, grow the reg_info information. If the current size is less than
351 the number of psuedos, grow to 25% more than the number of
352 pseudos.
353
354 Second, assure that all of the slots up to max_reg_num have been
355 filled with reg_info structures. */
356
357static void
358df_grow_reg_info (struct dataflow *dflow, struct df_ref_info *ref_info)
359{
360 unsigned int max_reg = max_reg_num ();
361 unsigned int new_size = max_reg;
3e6933a8 362 struct df_scan_problem_data *problem_data
363 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 364 unsigned int i;
365
366 if (ref_info->regs_size < new_size)
367 {
368 new_size += new_size / 4;
369 ref_info->regs = xrealloc (ref_info->regs,
370 new_size *sizeof (struct df_reg_info*));
371 ref_info->regs_size = new_size;
372 }
373
374 for (i = ref_info->regs_inited; i < max_reg; i++)
375 {
376 struct df_reg_info *reg_info = pool_alloc (problem_data->reg_pool);
377 memset (reg_info, 0, sizeof (struct df_reg_info));
378 ref_info->regs[i] = reg_info;
379 }
380
381 ref_info->regs_inited = max_reg;
382}
383
384
385/* Grow the ref information. */
386
387static void
388df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
389{
390 if (ref_info->refs_size < new_size)
391 {
392 ref_info->refs = xrealloc (ref_info->refs,
393 new_size *sizeof (struct df_ref *));
394 memset (ref_info->refs + ref_info->refs_size, 0,
395 (new_size - ref_info->refs_size) *sizeof (struct df_ref *));
396 ref_info->refs_size = new_size;
397 }
398}
399
400
401/* Grow the ref information. If the current size is less than the
402 number of instructions, grow to 25% more than the number of
403 instructions. */
404
405static void
406df_grow_insn_info (struct df *df)
407{
408 unsigned int new_size = get_max_uid () + 1;
409 if (df->insns_size < new_size)
410 {
411 new_size += new_size / 4;
412 df->insns = xrealloc (df->insns,
413 new_size *sizeof (struct df_insn_info *));
414 memset (df->insns + df->insns_size, 0,
415 (new_size - df->insns_size) *sizeof (struct df_insn_info *));
416 df->insns_size = new_size;
417 }
418}
419
420
421
422\f
423/*----------------------------------------------------------------------------
424 PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
425----------------------------------------------------------------------------*/
426
427/* Rescan some BLOCKS or all the blocks defined by the last call to
428 df_set_blocks if BLOCKS is NULL); */
429
430void
431df_rescan_blocks (struct df *df, bitmap blocks)
432{
433 bitmap local_blocks_to_scan = BITMAP_ALLOC (NULL);
434
d0802b39 435 struct dataflow *dflow = df->problems_by_index[DF_SCAN];
e011eba9 436 basic_block bb;
437
243f24c5 438 df->def_info.refs_organized_size = 0;
439 df->use_info.refs_organized_size = 0;
e011eba9 440
441 if (blocks)
442 {
f64e6a69 443 int i;
3e6933a8 444 unsigned int bb_index;
445 bitmap_iterator bi;
446 bool cleared_bits = false;
f64e6a69 447
e011eba9 448 /* Need to assure that there are space in all of the tables. */
449 unsigned int insn_num = get_max_uid () + 1;
450 insn_num += insn_num / 4;
451
452 df_grow_reg_info (dflow, &df->def_info);
453 df_grow_ref_info (&df->def_info, insn_num);
454
455 df_grow_reg_info (dflow, &df->use_info);
456 df_grow_ref_info (&df->use_info, insn_num *2);
457
458 df_grow_insn_info (df);
459 df_grow_bb_info (dflow);
460
461 bitmap_copy (local_blocks_to_scan, blocks);
3e6933a8 462
463 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
464 {
465 basic_block bb = BASIC_BLOCK (bb_index);
466 if (!bb)
467 {
468 bitmap_clear_bit (local_blocks_to_scan, bb_index);
469 cleared_bits = true;
470 }
471 }
472
473 if (cleared_bits)
474 bitmap_copy (blocks, local_blocks_to_scan);
475
e011eba9 476 df->def_info.add_refs_inline = true;
477 df->use_info.add_refs_inline = true;
478
f64e6a69 479 for (i = df->num_problems_defined; i; i--)
480 {
481 bitmap blocks_to_reset = NULL;
1c1a6437 482 if (dflow->problem->reset_fun)
f64e6a69 483 {
484 if (!blocks_to_reset)
485 {
486 blocks_to_reset = BITMAP_ALLOC (NULL);
487 bitmap_copy (blocks_to_reset, local_blocks_to_scan);
488 if (df->blocks_to_scan)
489 bitmap_ior_into (blocks_to_reset, df->blocks_to_scan);
490 }
1c1a6437 491 dflow->problem->reset_fun (dflow, blocks_to_reset);
f64e6a69 492 }
493 if (blocks_to_reset)
494 BITMAP_FREE (blocks_to_reset);
495 }
496
e011eba9 497 df_refs_delete (dflow, local_blocks_to_scan);
498
499 /* This may be a mistake, but if an explicit blocks is passed in
500 and the set of blocks to analyze has been explicitly set, add
501 the extra blocks to blocks_to_analyze. The alternative is to
502 put an assert here. We do not want this to just go by
503 silently or else we may get storage leaks. */
504 if (df->blocks_to_analyze)
505 bitmap_ior_into (df->blocks_to_analyze, blocks);
506 }
507 else
508 {
509 /* If we are going to do everything, just reallocate everything.
510 Most stuff is allocated in pools so this is faster than
511 walking it. */
512 if (df->blocks_to_analyze)
513 bitmap_copy (local_blocks_to_scan, df->blocks_to_analyze);
514 else
515 FOR_ALL_BB (bb)
516 {
517 bitmap_set_bit (local_blocks_to_scan, bb->index);
518 }
3e6933a8 519 df_scan_alloc (dflow, local_blocks_to_scan, NULL);
e011eba9 520
521 df->def_info.add_refs_inline = false;
522 df->use_info.add_refs_inline = false;
523 }
524
525 df_refs_record (dflow, local_blocks_to_scan);
526#if 0
527 bitmap_print (stderr, local_blocks_to_scan, "scanning: ", "\n");
528#endif
529
530 if (!df->blocks_to_scan)
531 df->blocks_to_scan = BITMAP_ALLOC (NULL);
532
533 bitmap_ior_into (df->blocks_to_scan, local_blocks_to_scan);
534 BITMAP_FREE (local_blocks_to_scan);
535}
536
3e6933a8 537
e011eba9 538/* Create a new ref of type DF_REF_TYPE for register REG at address
539 LOC within INSN of BB. */
540
541struct df_ref *
542df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
543 basic_block bb,
544 enum df_ref_type ref_type,
545 enum df_ref_flags ref_flags)
546{
547 struct dataflow *dflow = df->problems_by_index[DF_SCAN];
548 struct df_scan_bb_info *bb_info;
549
550 df_grow_reg_info (dflow, &df->use_info);
551 df_grow_reg_info (dflow, &df->def_info);
552 df_grow_bb_info (dflow);
553
554 /* Make sure there is the bb_info for this block. */
555 bb_info = df_scan_get_bb_info (dflow, bb->index);
556 if (!bb_info)
557 {
558 bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
559 df_scan_set_bb_info (dflow, bb->index, bb_info);
560 bb_info->artificial_defs = NULL;
561 bb_info->artificial_uses = NULL;
562 }
563
564 if (ref_type == DF_REF_REG_DEF)
565 df->def_info.add_refs_inline = true;
566 else
567 df->use_info.add_refs_inline = true;
568
569 return df_ref_create_structure (dflow, reg, loc, bb, insn, ref_type, ref_flags);
570}
571
572
573\f
574/*----------------------------------------------------------------------------
575 UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
576----------------------------------------------------------------------------*/
577
578
334ec2d8 579/* Get the artificial uses for a basic block. */
e011eba9 580
581struct df_ref *
582df_get_artificial_defs (struct df *df, unsigned int bb_index)
583{
584 struct dataflow *dflow = df->problems_by_index[DF_SCAN];
585 return df_scan_get_bb_info (dflow, bb_index)->artificial_defs;
586}
587
588
334ec2d8 589/* Get the artificial uses for a basic block. */
e011eba9 590
591struct df_ref *
592df_get_artificial_uses (struct df *df, unsigned int bb_index)
593{
594 struct dataflow *dflow = df->problems_by_index[DF_SCAN];
595 return df_scan_get_bb_info (dflow, bb_index)->artificial_uses;
596}
597
598
599/* Link REF at the front of reg_use or reg_def chain for REGNO. */
600
601void
602df_reg_chain_create (struct df_reg_info *reg_info,
603 struct df_ref *ref)
604{
605 struct df_ref *head = reg_info->reg_chain;
606 reg_info->reg_chain = ref;
607
608 DF_REF_NEXT_REG (ref) = head;
609
610 /* We cannot actually link to the head of the chain. */
611 DF_REF_PREV_REG (ref) = NULL;
612
613 if (head)
614 DF_REF_PREV_REG (head) = ref;
615}
616
617
618/* Remove REF from the CHAIN. Return the head of the chain. This
619 will be CHAIN unless the REF was at the beginning of the chain. */
620
621static struct df_ref *
622df_ref_unlink (struct df_ref *chain, struct df_ref *ref)
623{
624 struct df_ref *orig_chain = chain;
625 struct df_ref *prev = NULL;
626 while (chain)
627 {
628 if (chain == ref)
629 {
630 if (prev)
631 {
632 prev->next_ref = ref->next_ref;
633 ref->next_ref = NULL;
634 return orig_chain;
635 }
636 else
637 {
638 chain = ref->next_ref;
639 ref->next_ref = NULL;
640 return chain;
641 }
642 }
643
644 prev = chain;
645 chain = chain->next_ref;
646 }
647
648 /* Someone passed in a ref that was not in the chain. */
649 gcc_unreachable ();
650 return NULL;
651}
652
653
654/* Unlink and delete REF at the reg_use or reg_def chain. Also delete
655 the def-use or use-def chain if it exists. Returns the next ref in
656 uses or defs chain. */
657
658struct df_ref *
659df_reg_chain_unlink (struct dataflow *dflow, struct df_ref *ref)
660{
661 struct df *df = dflow->df;
662 struct df_ref *next = DF_REF_NEXT_REG (ref);
663 struct df_ref *prev = DF_REF_PREV_REG (ref);
3e6933a8 664 struct df_scan_problem_data *problem_data
665 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 666 struct df_reg_info *reg_info;
667 struct df_ref *next_ref = ref->next_ref;
668 unsigned int id = DF_REF_ID (ref);
669
670 if (DF_REF_TYPE (ref) == DF_REF_REG_DEF)
671 {
672 reg_info = DF_REG_DEF_GET (df, DF_REF_REGNO (ref));
673 df->def_info.bitmap_size--;
674 if (df->def_info.refs && (id < df->def_info.refs_size))
675 DF_DEFS_SET (df, id, NULL);
676 }
677 else
678 {
679 reg_info = DF_REG_USE_GET (df, DF_REF_REGNO (ref));
680 df->use_info.bitmap_size--;
681 if (df->use_info.refs && (id < df->use_info.refs_size))
682 DF_USES_SET (df, id, NULL);
683 }
684
685 /* Delete any def-use or use-def chains that start here. */
686 if (DF_REF_CHAIN (ref))
687 df_chain_unlink (df->problems_by_index[DF_CHAIN], ref, NULL);
688
689 reg_info->n_refs--;
690
691 /* Unlink from the reg chain. If there is no prev, this is the
692 first of the list. If not, just join the next and prev. */
693 if (prev)
694 {
695 DF_REF_NEXT_REG (prev) = next;
696 if (next)
697 DF_REF_PREV_REG (next) = prev;
698 }
699 else
700 {
701 reg_info->reg_chain = next;
702 if (next)
703 DF_REF_PREV_REG (next) = NULL;
704 }
705
706 pool_free (problem_data->ref_pool, ref);
707 return next_ref;
708}
709
710
711/* Unlink REF from all def-use/use-def chains, etc. */
712
713void
714df_ref_remove (struct df *df, struct df_ref *ref)
715{
d0802b39 716 struct dataflow *dflow = df->problems_by_index[DF_SCAN];
e011eba9 717 if (DF_REF_REG_DEF_P (ref))
718 {
719 if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
720 {
721 struct df_scan_bb_info *bb_info
722 = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
723 bb_info->artificial_defs
724 = df_ref_unlink (bb_info->artificial_defs, ref);
725 }
726 else
3e6933a8 727 DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref))
728 = df_ref_unlink (DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)), ref);
e011eba9 729
730 if (df->def_info.add_refs_inline)
731 DF_DEFS_SET (df, DF_REF_ID (ref), NULL);
732 }
733 else
734 {
735 if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
736 {
737 struct df_scan_bb_info *bb_info
738 = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
739 bb_info->artificial_uses
740 = df_ref_unlink (bb_info->artificial_uses, ref);
741 }
742 else
3e6933a8 743 DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref))
744 = df_ref_unlink (DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)), ref);
e011eba9 745
746 if (df->use_info.add_refs_inline)
747 DF_USES_SET (df, DF_REF_ID (ref), NULL);
748 }
749
750 df_reg_chain_unlink (dflow, ref);
751}
752
753
754/* Create the insn record for INSN. If there was one there, zero it out. */
755
756static struct df_insn_info *
757df_insn_create_insn_record (struct dataflow *dflow, rtx insn)
758{
759 struct df *df = dflow->df;
3e6933a8 760 struct df_scan_problem_data *problem_data
761 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 762
763 struct df_insn_info *insn_rec = DF_INSN_GET (df, insn);
764 if (!insn_rec)
765 {
766 insn_rec = pool_alloc (problem_data->insn_pool);
767 DF_INSN_SET (df, insn, insn_rec);
768 }
769 memset (insn_rec, 0, sizeof (struct df_insn_info));
770
771 return insn_rec;
772}
773
d0802b39 774
775/* Delete all of the refs information from INSN. */
e011eba9 776
777void
778df_insn_refs_delete (struct dataflow *dflow, rtx insn)
779{
780 struct df *df = dflow->df;
781 unsigned int uid = INSN_UID (insn);
f64e6a69 782 struct df_insn_info *insn_info = NULL;
e011eba9 783 struct df_ref *ref;
3e6933a8 784 struct df_scan_problem_data *problem_data
785 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 786
f64e6a69 787 if (uid < df->insns_size)
788 insn_info = DF_INSN_UID_GET (df, uid);
789
e011eba9 790 if (insn_info)
791 {
3e6933a8 792 struct df_mw_hardreg *hardregs = insn_info->mw_hardregs;
793
794 while (hardregs)
795 {
796 struct df_mw_hardreg *next_hr = hardregs->next;
797 struct df_link *link = hardregs->regs;
798 while (link)
799 {
800 struct df_link *next_l = link->next;
801 pool_free (problem_data->mw_link_pool, link);
802 link = next_l;
803 }
804
805 pool_free (problem_data->mw_reg_pool, hardregs);
806 hardregs = next_hr;
807 }
808
e011eba9 809 ref = insn_info->defs;
810 while (ref)
811 ref = df_reg_chain_unlink (dflow, ref);
812
813 ref = insn_info->uses;
814 while (ref)
815 ref = df_reg_chain_unlink (dflow, ref);
816
817 pool_free (problem_data->insn_pool, insn_info);
818 DF_INSN_SET (df, insn, NULL);
819 }
820}
821
822
d0802b39 823/* Delete all of the refs information from basic_block with BB_INDEX. */
824
825void
826df_bb_refs_delete (struct dataflow *dflow, int bb_index)
827{
828 struct df_ref *def;
829 struct df_ref *use;
830
831 struct df_scan_bb_info *bb_info
832 = df_scan_get_bb_info (dflow, bb_index);
833 rtx insn;
834 basic_block bb = BASIC_BLOCK (bb_index);
835 FOR_BB_INSNS (bb, insn)
836 {
837 if (INSN_P (insn))
838 {
839 /* Record defs within INSN. */
840 df_insn_refs_delete (dflow, insn);
841 }
842 }
843
334ec2d8 844 /* Get rid of any artificial uses or defs. */
d0802b39 845 if (bb_info)
846 {
847 def = bb_info->artificial_defs;
848 while (def)
849 def = df_reg_chain_unlink (dflow, def);
850 bb_info->artificial_defs = NULL;
851 use = bb_info->artificial_uses;
852 while (use)
853 use = df_reg_chain_unlink (dflow, use);
854 bb_info->artificial_uses = NULL;
855 }
856}
857
858
e011eba9 859/* Delete all of the refs information from BLOCKS. */
860
861void
862df_refs_delete (struct dataflow *dflow, bitmap blocks)
863{
864 bitmap_iterator bi;
865 unsigned int bb_index;
e011eba9 866
867 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
868 {
d0802b39 869 df_bb_refs_delete (dflow, bb_index);
e011eba9 870 }
871}
872
873
874/* Take build ref table for either the uses or defs from the reg-use
875 or reg-def chains. */
876
877void
878df_reorganize_refs (struct df_ref_info *ref_info)
879{
880 unsigned int m = ref_info->regs_inited;
881 unsigned int regno;
882 unsigned int offset = 0;
883 unsigned int size = 0;
884
243f24c5 885 if (ref_info->refs_organized_size)
e011eba9 886 return;
887
888 if (ref_info->refs_size < ref_info->bitmap_size)
889 {
890 int new_size = ref_info->bitmap_size + ref_info->bitmap_size / 4;
891 df_grow_ref_info (ref_info, new_size);
892 }
893
894 for (regno = 0; regno < m; regno++)
895 {
896 struct df_reg_info *reg_info = ref_info->regs[regno];
897 int count = 0;
898 if (reg_info)
899 {
900 struct df_ref *ref = reg_info->reg_chain;
901 reg_info->begin = offset;
902 while (ref)
903 {
904 ref_info->refs[offset] = ref;
905 DF_REF_ID (ref) = offset++;
906 ref = DF_REF_NEXT_REG (ref);
907 count++;
908 size++;
909 }
910 reg_info->n_refs = count;
911 }
912 }
913
914 /* The bitmap size is not decremented when refs are deleted. So
915 reset it now that we have squished out all of the empty
916 slots. */
917 ref_info->bitmap_size = size;
243f24c5 918 ref_info->refs_organized_size = size;
e011eba9 919 ref_info->add_refs_inline = true;
920}
921
e011eba9 922\f
923/*----------------------------------------------------------------------------
924 Hard core instruction scanning code. No external interfaces here,
925 just a lot of routines that look inside insns.
926----------------------------------------------------------------------------*/
927
928/* Create a ref and add it to the reg-def or reg-use chains. */
929
930static struct df_ref *
931df_ref_create_structure (struct dataflow *dflow, rtx reg, rtx *loc,
932 basic_block bb, rtx insn,
933 enum df_ref_type ref_type,
934 enum df_ref_flags ref_flags)
935{
936 struct df_ref *this_ref;
937 struct df *df = dflow->df;
938 int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
3e6933a8 939 struct df_scan_problem_data *problem_data
940 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 941
942 this_ref = pool_alloc (problem_data->ref_pool);
943 DF_REF_REG (this_ref) = reg;
944 DF_REF_REGNO (this_ref) = regno;
945 DF_REF_LOC (this_ref) = loc;
946 DF_REF_INSN (this_ref) = insn;
947 DF_REF_CHAIN (this_ref) = NULL;
948 DF_REF_TYPE (this_ref) = ref_type;
949 DF_REF_FLAGS (this_ref) = ref_flags;
950 DF_REF_DATA (this_ref) = NULL;
951 DF_REF_BB (this_ref) = bb;
952
953 /* Link the ref into the reg_def and reg_use chains and keep a count
954 of the instances. */
3e6933a8 955 switch (ref_type)
e011eba9 956 {
3e6933a8 957 case DF_REF_REG_DEF:
958 {
959 struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno);
243f24c5 960 unsigned int size = df->def_info.refs_organized_size
961 ? df->def_info.refs_organized_size
962 : df->def_info.bitmap_size;
3e6933a8 963
964 /* Add the ref to the reg_def chain. */
243f24c5 965 reg_info->n_refs++;
3e6933a8 966 df_reg_chain_create (reg_info, this_ref);
243f24c5 967 DF_REF_ID (this_ref) = size;
3e6933a8 968 if (df->def_info.add_refs_inline)
969 {
243f24c5 970 if (size >= df->def_info.refs_size)
3e6933a8 971 {
243f24c5 972 int new_size = size + size / 4;
3e6933a8 973 df_grow_ref_info (&df->def_info, new_size);
974 }
975 /* Add the ref to the big array of defs. */
243f24c5 976 DF_DEFS_SET (df, size, this_ref);
977 if (df->def_info.refs_organized_size)
978 df->def_info.refs_organized_size++;
3e6933a8 979 }
980
981 df->def_info.bitmap_size++;
982
983 if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
984 {
985 struct df_scan_bb_info *bb_info
986 = df_scan_get_bb_info (dflow, bb->index);
987 this_ref->next_ref = bb_info->artificial_defs;
988 bb_info->artificial_defs = this_ref;
989 }
990 else
991 {
992 this_ref->next_ref = DF_INSN_GET (df, insn)->defs;
993 DF_INSN_GET (df, insn)->defs = this_ref;
994 }
995 }
996 break;
e011eba9 997
3e6933a8 998 case DF_REF_REG_MEM_LOAD:
999 case DF_REF_REG_MEM_STORE:
1000 case DF_REF_REG_USE:
1001 {
1002 struct df_reg_info *reg_info = DF_REG_USE_GET (df, regno);
243f24c5 1003 unsigned int size = df->use_info.refs_organized_size
1004 ? df->use_info.refs_organized_size
1005 : df->use_info.bitmap_size;
3e6933a8 1006
1007 /* Add the ref to the reg_use chain. */
243f24c5 1008 reg_info->n_refs++;
3e6933a8 1009 df_reg_chain_create (reg_info, this_ref);
243f24c5 1010 DF_REF_ID (this_ref) = size;
3e6933a8 1011 if (df->use_info.add_refs_inline)
1012 {
243f24c5 1013 if (size >= df->use_info.refs_size)
3e6933a8 1014 {
243f24c5 1015 int new_size = size + size / 4;
3e6933a8 1016 df_grow_ref_info (&df->use_info, new_size);
1017 }
1018 /* Add the ref to the big array of defs. */
243f24c5 1019 DF_USES_SET (df, size, this_ref);
1020 if (df->def_info.refs_organized_size)
1021 df->def_info.refs_organized_size++;
3e6933a8 1022 }
1023
1024 df->use_info.bitmap_size++;
1025 if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
1026 {
1027 struct df_scan_bb_info *bb_info
1028 = df_scan_get_bb_info (dflow, bb->index);
1029 this_ref->next_ref = bb_info->artificial_uses;
1030 bb_info->artificial_uses = this_ref;
1031 }
1032 else
1033 {
1034 this_ref->next_ref = DF_INSN_GET (df, insn)->uses;
1035 DF_INSN_GET (df, insn)->uses = this_ref;
1036 }
1037 }
1038 break;
e011eba9 1039
3e6933a8 1040 default:
1041 gcc_unreachable ();
e011eba9 1042
e011eba9 1043 }
1044 return this_ref;
1045}
1046
1047
1048/* Create new references of type DF_REF_TYPE for each part of register REG
1049 at address LOC within INSN of BB. */
1050
1051static void
1052df_ref_record (struct dataflow *dflow, rtx reg, rtx *loc,
1053 basic_block bb, rtx insn,
1054 enum df_ref_type ref_type,
1055 enum df_ref_flags ref_flags,
1056 bool record_live)
1057{
e011eba9 1058 struct df *df = dflow->df;
3e6933a8 1059 rtx oldreg = reg;
1060 unsigned int regno;
e011eba9 1061
1062 gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
1063
1064 /* For the reg allocator we are interested in some SUBREG rtx's, but not
1065 all. Notably only those representing a word extraction from a multi-word
1066 reg. As written in the docu those should have the form
1067 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
1068 XXX Is that true? We could also use the global word_mode variable. */
3e6933a8 1069 if ((dflow->flags & DF_SUBREGS) == 0
e011eba9 1070 && GET_CODE (reg) == SUBREG
1071 && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
1072 || GET_MODE_SIZE (GET_MODE (reg))
1073 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
1074 {
1075 loc = &SUBREG_REG (reg);
1076 reg = *loc;
1077 ref_flags |= DF_REF_STRIPPED;
1078 }
1079
1080 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
1081 if (regno < FIRST_PSEUDO_REGISTER)
1082 {
3e6933a8 1083 unsigned int i;
1084 unsigned int endregno;
1085 struct df_mw_hardreg *hardreg = NULL;
1086 struct df_scan_problem_data *problem_data
1087 = (struct df_scan_problem_data *) dflow->problem_data;
e011eba9 1088
3e6933a8 1089 if (!(dflow->flags & DF_HARD_REGS))
e011eba9 1090 return;
1091
e011eba9 1092 if (GET_CODE (reg) == SUBREG)
fe2ebfc8 1093 {
1094 regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
1095 SUBREG_BYTE (reg), GET_MODE (reg));
1096 endregno = subreg_nregs (reg);
1097 }
1098 else
1099 endregno = hard_regno_nregs[regno][GET_MODE (reg)];
e011eba9 1100 endregno += regno;
1101
3e6933a8 1102 /* If this is a multiword hardreg, we create some extra datastructures that
1103 will enable us to easily build REG_DEAD and REG_UNUSED notes. */
1104 if ((endregno != regno + 1) && insn)
1105 {
1106 struct df_insn_info *insn_info = DF_INSN_GET (df, insn);
1107 /* Sets to a subreg of a multiword register are partial.
1108 Sets to a non-subreg of a multiword register are not. */
1109 if (GET_CODE (oldreg) == SUBREG)
1110 ref_flags |= DF_REF_PARTIAL;
1111 ref_flags |= DF_REF_MW_HARDREG;
1112 hardreg = pool_alloc (problem_data->mw_reg_pool);
1113 hardreg->next = insn_info->mw_hardregs;
1114 insn_info->mw_hardregs = hardreg;
1115 hardreg->type = ref_type;
1116 hardreg->flags = ref_flags;
1117 hardreg->mw_reg = reg;
1118 hardreg->regs = NULL;
1119
1120 }
1121
e011eba9 1122 for (i = regno; i < endregno; i++)
1123 {
3e6933a8 1124 struct df_ref *ref;
1125
e011eba9 1126 /* Calls are handled at call site because regs_ever_live
1127 doesn't include clobbered regs, only used ones. */
1128 if (ref_type == DF_REF_REG_DEF && record_live)
1129 regs_ever_live[i] = 1;
1130 else if ((ref_type == DF_REF_REG_USE
1131 || ref_type == DF_REF_REG_MEM_STORE
1132 || ref_type == DF_REF_REG_MEM_LOAD)
1133 && ((ref_flags & DF_REF_ARTIFICIAL) == 0))
1134 {
1135 /* Set regs_ever_live on uses of non-eliminable frame
1136 pointers and arg pointers. */
3e6933a8 1137 if (!(TEST_HARD_REG_BIT (elim_reg_set, regno)
e011eba9 1138 && (regno == FRAME_POINTER_REGNUM
1139 || regno == ARG_POINTER_REGNUM)))
1140 regs_ever_live[i] = 1;
1141 }
1142
3e6933a8 1143 ref = df_ref_create_structure (dflow, regno_reg_rtx[i], loc,
1144 bb, insn, ref_type, ref_flags);
1145 if (hardreg)
1146 {
1147 struct df_link *link = pool_alloc (problem_data->mw_link_pool);
1148
1149 link->next = hardreg->regs;
1150 link->ref = ref;
1151 hardreg->regs = link;
1152 }
e011eba9 1153 }
1154 }
1155 else
1156 {
1157 df_ref_create_structure (dflow, reg, loc,
1158 bb, insn, ref_type, ref_flags);
1159 }
1160}
1161
1162
1163/* A set to a non-paradoxical SUBREG for which the number of word_mode units
1164 covered by the outer mode is smaller than that covered by the inner mode,
1165 is a read-modify-write operation.
1166 This function returns true iff the SUBREG X is such a SUBREG. */
1167
1168bool
1169df_read_modify_subreg_p (rtx x)
1170{
1171 unsigned int isize, osize;
1172 if (GET_CODE (x) != SUBREG)
1173 return false;
1174 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1175 osize = GET_MODE_SIZE (GET_MODE (x));
1176 return (isize > osize && isize > UNITS_PER_WORD);
1177}
1178
1179
1180/* Process all the registers defined in the rtx, X.
1181 Autoincrement/decrement definitions will be picked up by
1182 df_uses_record. */
1183
1184static void
1185df_def_record_1 (struct dataflow *dflow, rtx x,
1186 basic_block bb, rtx insn,
1187 enum df_ref_flags flags, bool record_live)
1188{
1189 rtx *loc;
1190 rtx dst;
3e6933a8 1191 bool dst_in_strict_lowpart = false;
e011eba9 1192
1193 /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
1194 construct. */
1195 if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
1196 loc = &XEXP (x, 0);
1197 else
1198 loc = &SET_DEST (x);
1199 dst = *loc;
1200
35792caf 1201 /* It is legal to have a set destination be a parallel. */
1202 if (GET_CODE (dst) == PARALLEL)
e011eba9 1203 {
1204 int i;
1205
1206 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
1207 {
1208 rtx temp = XVECEXP (dst, 0, i);
1209 if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
1210 || GET_CODE (temp) == SET)
1211 df_def_record_1 (dflow, temp, bb, insn,
3e6933a8 1212 GET_CODE (temp) == CLOBBER
1213 ? flags | DF_REF_MUST_CLOBBER : flags,
e011eba9 1214 record_live);
1215 }
1216 return;
1217 }
1218
1219 /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
1220 be handy for the reg allocator. */
1221 while (GET_CODE (dst) == STRICT_LOW_PART
1222 || GET_CODE (dst) == ZERO_EXTRACT
1223 || df_read_modify_subreg_p (dst))
1224 {
1225#if 0
1226 /* Strict low part always contains SUBREG, but we do not want to make
1227 it appear outside, as whole register is always considered. */
1228 if (GET_CODE (dst) == STRICT_LOW_PART)
1229 {
1230 loc = &XEXP (dst, 0);
1231 dst = *loc;
1232 }
1233#endif
1234 loc = &XEXP (dst, 0);
3e6933a8 1235 if (GET_CODE (dst) == STRICT_LOW_PART)
1236 dst_in_strict_lowpart = true;
e011eba9 1237 dst = *loc;
1238 flags |= DF_REF_READ_WRITE;
3e6933a8 1239
e011eba9 1240 }
1241
3e6933a8 1242 /* Sets to a subreg of a single word register are partial sets if
1243 they are wrapped in a strict lowpart, and not partial otherwise.
1244 */
1245 if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))
1246 && dst_in_strict_lowpart)
1247 flags |= DF_REF_PARTIAL;
1248
e011eba9 1249 if (REG_P (dst)
1250 || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))))
1251 df_ref_record (dflow, dst, loc, bb, insn,
1252 DF_REF_REG_DEF, flags, record_live);
1253}
1254
1255
1256/* Process all the registers defined in the pattern rtx, X. */
1257
1258static void
1259df_defs_record (struct dataflow *dflow, rtx x, basic_block bb, rtx insn)
1260{
1261 RTX_CODE code = GET_CODE (x);
1262
1263 if (code == SET || code == CLOBBER)
1264 {
1265 /* Mark the single def within the pattern. */
1266 df_def_record_1 (dflow, x, bb, insn,
3e6933a8 1267 code == CLOBBER ? DF_REF_MUST_CLOBBER : 0, true);
e011eba9 1268 }
1269 else if (code == COND_EXEC)
1270 {
1271 df_defs_record (dflow, COND_EXEC_CODE (x), bb, insn);
1272 }
1273 else if (code == PARALLEL)
1274 {
1275 int i;
1276
1277 /* Mark the multiple defs within the pattern. */
1278 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1279 df_defs_record (dflow, XVECEXP (x, 0, i), bb, insn);
1280 }
1281}
1282
1283
1284/* Process all the registers used in the rtx at address LOC. */
1285
1286static void
1287df_uses_record (struct dataflow *dflow, rtx *loc, enum df_ref_type ref_type,
1288 basic_block bb, rtx insn, enum df_ref_flags flags)
1289{
1290 RTX_CODE code;
1291 rtx x;
1292 retry:
1293 x = *loc;
1294 if (!x)
1295 return;
1296 code = GET_CODE (x);
1297 switch (code)
1298 {
1299 case LABEL_REF:
1300 case SYMBOL_REF:
1301 case CONST_INT:
1302 case CONST:
1303 case CONST_DOUBLE:
1304 case CONST_VECTOR:
1305 case PC:
1306 case CC0:
1307 case ADDR_VEC:
1308 case ADDR_DIFF_VEC:
1309 return;
1310
1311 case CLOBBER:
1312 /* If we are clobbering a MEM, mark any registers inside the address
1313 as being used. */
1314 if (MEM_P (XEXP (x, 0)))
1315 df_uses_record (dflow, &XEXP (XEXP (x, 0), 0),
1316 DF_REF_REG_MEM_STORE, bb, insn, flags);
1317
1318 /* If we're clobbering a REG then we have a def so ignore. */
1319 return;
1320
1321 case MEM:
1322 df_uses_record (dflow, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn,
1323 flags & DF_REF_IN_NOTE);
1324 return;
1325
1326 case SUBREG:
1327 /* While we're here, optimize this case. */
3e6933a8 1328 flags |= DF_REF_PARTIAL;
e011eba9 1329 /* In case the SUBREG is not of a REG, do not optimize. */
1330 if (!REG_P (SUBREG_REG (x)))
1331 {
1332 loc = &SUBREG_REG (x);
1333 df_uses_record (dflow, loc, ref_type, bb, insn, flags);
1334 return;
1335 }
1336 /* ... Fall through ... */
1337
1338 case REG:
1339 df_ref_record (dflow, x, loc, bb, insn, ref_type, flags, true);
1340 return;
1341
1342 case SET:
1343 {
1344 rtx dst = SET_DEST (x);
1345 gcc_assert (!(flags & DF_REF_IN_NOTE));
fcf2ad9f 1346 df_uses_record (dflow, &SET_SRC (x), DF_REF_REG_USE, bb, insn, flags);
e011eba9 1347
1348 switch (GET_CODE (dst))
1349 {
1350 case SUBREG:
1351 if (df_read_modify_subreg_p (dst))
1352 {
1353 df_uses_record (dflow, &SUBREG_REG (dst),
1354 DF_REF_REG_USE, bb,
fcf2ad9f 1355 insn, flags | DF_REF_READ_WRITE);
e011eba9 1356 break;
1357 }
1358 /* Fall through. */
1359 case REG:
1360 case PARALLEL:
1361 case SCRATCH:
1362 case PC:
1363 case CC0:
1364 break;
1365 case MEM:
1366 df_uses_record (dflow, &XEXP (dst, 0),
1367 DF_REF_REG_MEM_STORE,
fcf2ad9f 1368 bb, insn, flags);
e011eba9 1369 break;
1370 case STRICT_LOW_PART:
1371 {
1372 rtx *temp = &XEXP (dst, 0);
1373 /* A strict_low_part uses the whole REG and not just the
1374 SUBREG. */
1375 dst = XEXP (dst, 0);
1376 df_uses_record (dflow,
1377 (GET_CODE (dst) == SUBREG)
1378 ? &SUBREG_REG (dst) : temp,
1379 DF_REF_REG_USE, bb,
1380 insn, DF_REF_READ_WRITE);
1381 }
1382 break;
1383 case ZERO_EXTRACT:
1384 case SIGN_EXTRACT:
1385 df_uses_record (dflow, &XEXP (dst, 0),
1386 DF_REF_REG_USE, bb, insn,
1387 DF_REF_READ_WRITE);
1388 df_uses_record (dflow, &XEXP (dst, 1),
fcf2ad9f 1389 DF_REF_REG_USE, bb, insn, flags);
e011eba9 1390 df_uses_record (dflow, &XEXP (dst, 2),
fcf2ad9f 1391 DF_REF_REG_USE, bb, insn, flags);
e011eba9 1392 dst = XEXP (dst, 0);
1393 break;
1394 default:
1395 gcc_unreachable ();
1396 }
1397 return;
1398 }
1399
1400 case RETURN:
1401 break;
1402
1403 case ASM_OPERANDS:
1404 case UNSPEC_VOLATILE:
1405 case TRAP_IF:
1406 case ASM_INPUT:
1407 {
1408 /* Traditional and volatile asm instructions must be
1409 considered to use and clobber all hard registers, all
1410 pseudo-registers and all of memory. So must TRAP_IF and
1411 UNSPEC_VOLATILE operations.
1412
1413 Consider for instance a volatile asm that changes the fpu
1414 rounding mode. An insn should not be moved across this
1415 even if it only uses pseudo-regs because it might give an
1416 incorrectly rounded result.
1417
1418 However, flow.c's liveness computation did *not* do this,
1419 giving the reasoning as " ?!? Unfortunately, marking all
1420 hard registers as live causes massive problems for the
1421 register allocator and marking all pseudos as live creates
1422 mountains of uninitialized variable warnings."
1423
1424 In order to maintain the status quo with regard to liveness
1425 and uses, we do what flow.c did and just mark any regs we
1426 can find in ASM_OPERANDS as used. Later on, when liveness
1427 is computed, asm insns are scanned and regs_asm_clobbered
1428 is filled out.
1429
1430 For all ASM_OPERANDS, we must traverse the vector of input
1431 operands. We can not just fall through here since then we
1432 would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
1433 which do not indicate traditional asms unlike their normal
1434 usage. */
1435 if (code == ASM_OPERANDS)
1436 {
1437 int j;
1438
1439 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1440 df_uses_record (dflow, &ASM_OPERANDS_INPUT (x, j),
fcf2ad9f 1441 DF_REF_REG_USE, bb, insn, flags);
e011eba9 1442 return;
1443 }
1444 break;
1445 }
1446
1447 case PRE_DEC:
1448 case POST_DEC:
1449 case PRE_INC:
1450 case POST_INC:
1451 case PRE_MODIFY:
1452 case POST_MODIFY:
1453 /* Catch the def of the register being modified. */
fcf2ad9f 1454 flags |= DF_REF_READ_WRITE;
e011eba9 1455 df_ref_record (dflow, XEXP (x, 0), &XEXP (x, 0), bb, insn,
fcf2ad9f 1456 DF_REF_REG_DEF, flags, true);
e011eba9 1457
1458 /* ... Fall through to handle uses ... */
1459
1460 default:
1461 break;
1462 }
1463
1464 /* Recursively scan the operands of this expression. */
1465 {
1466 const char *fmt = GET_RTX_FORMAT (code);
1467 int i;
1468
1469 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1470 {
1471 if (fmt[i] == 'e')
1472 {
1473 /* Tail recursive case: save a function call level. */
1474 if (i == 0)
1475 {
1476 loc = &XEXP (x, 0);
1477 goto retry;
1478 }
1479 df_uses_record (dflow, &XEXP (x, i), ref_type, bb, insn, flags);
1480 }
1481 else if (fmt[i] == 'E')
1482 {
1483 int j;
1484 for (j = 0; j < XVECLEN (x, i); j++)
1485 df_uses_record (dflow, &XVECEXP (x, i, j), ref_type,
1486 bb, insn, flags);
1487 }
1488 }
1489 }
1490}
1491
1492/* Return true if *LOC contains an asm. */
1493
1494static int
1495df_insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
1496{
1497 if ( !*loc)
1498 return 0;
1499 if (GET_CODE (*loc) == ASM_OPERANDS)
1500 return 1;
1501 return 0;
1502}
1503
1504
1505/* Return true if INSN contains an ASM. */
1506
1507static int
1508df_insn_contains_asm (rtx insn)
1509{
1510 return for_each_rtx (&insn, df_insn_contains_asm_1, NULL);
1511}
1512
1513
1514
1515/* Record all the refs for DF within INSN of basic block BB. */
1516
1517static void
1518df_insn_refs_record (struct dataflow *dflow, basic_block bb, rtx insn)
1519{
e011eba9 1520 struct df *df = dflow->df;
3e6933a8 1521 int i;
e011eba9 1522
1523 if (INSN_P (insn))
1524 {
1525 rtx note;
1526
1527 if (df_insn_contains_asm (insn))
1528 DF_INSN_CONTAINS_ASM (df, insn) = true;
1529
1530 /* Record register defs. */
1531 df_defs_record (dflow, PATTERN (insn), bb, insn);
1532
3e6933a8 1533 if (dflow->flags & DF_EQUIV_NOTES)
e011eba9 1534 for (note = REG_NOTES (insn); note;
1535 note = XEXP (note, 1))
1536 {
1537 switch (REG_NOTE_KIND (note))
1538 {
1539 case REG_EQUIV:
1540 case REG_EQUAL:
1541 df_uses_record (dflow, &XEXP (note, 0), DF_REF_REG_USE,
1542 bb, insn, DF_REF_IN_NOTE);
1543 default:
1544 break;
1545 }
1546 }
1547
1548 if (CALL_P (insn))
1549 {
1550 rtx note;
1551
1552 /* Record the registers used to pass arguments, and explicitly
1553 noted as clobbered. */
1554 for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1555 note = XEXP (note, 1))
1556 {
1557 if (GET_CODE (XEXP (note, 0)) == USE)
1558 df_uses_record (dflow, &XEXP (XEXP (note, 0), 0),
1559 DF_REF_REG_USE,
1560 bb, insn, 0);
1561 else if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1562 {
1563 df_defs_record (dflow, XEXP (note, 0), bb, insn);
1564 if (REG_P (XEXP (XEXP (note, 0), 0)))
1565 {
1566 rtx reg = XEXP (XEXP (note, 0), 0);
1567 int regno_last;
1568 int regno_first;
1569 int i;
1570
1571 regno_last = regno_first = REGNO (reg);
1572 if (regno_first < FIRST_PSEUDO_REGISTER)
1573 regno_last
1574 += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1;
1575 for (i = regno_first; i <= regno_last; i++)
1576 regs_ever_live[i] = 1;
1577 }
1578 }
1579 }
1580
1581 /* The stack ptr is used (honorarily) by a CALL insn. */
1582 df_uses_record (dflow, &regno_reg_rtx[STACK_POINTER_REGNUM],
1583 DF_REF_REG_USE, bb, insn,
1584 0);
1585
3e6933a8 1586 if (dflow->flags & DF_HARD_REGS)
e011eba9 1587 {
1588 bitmap_iterator bi;
1589 unsigned int ui;
1590 /* Calls may also reference any of the global registers,
1591 so they are recorded as used. */
1592 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1593 if (global_regs[i])
1594 df_uses_record (dflow, &regno_reg_rtx[i],
1595 DF_REF_REG_USE, bb, insn,
1596 0);
1597 EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, ui, bi)
3e6933a8 1598 df_ref_record (dflow, regno_reg_rtx[ui], &regno_reg_rtx[ui], bb,
1599 insn, DF_REF_REG_DEF, DF_REF_MAY_CLOBBER, false);
e011eba9 1600 }
1601 }
1602
1603 /* Record the register uses. */
1604 df_uses_record (dflow, &PATTERN (insn),
1605 DF_REF_REG_USE, bb, insn, 0);
1606
1607 }
1608}
1609
1610static bool
1611df_has_eh_preds (basic_block bb)
1612{
1613 edge e;
1614 edge_iterator ei;
1615
1616 FOR_EACH_EDGE (e, ei, bb->preds)
1617 {
1618 if (e->flags & EDGE_EH)
1619 return true;
1620 }
1621 return false;
1622}
1623
1624/* Record all the refs within the basic block BB. */
1625
1626static void
1627df_bb_refs_record (struct dataflow *dflow, basic_block bb)
1628{
1629 struct df *df = dflow->df;
1630 rtx insn;
1631 int luid = 0;
1632 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb->index);
3e6933a8 1633 bitmap artificial_uses_at_bottom = NULL;
1634
1635 if (dflow->flags & DF_HARD_REGS)
1636 artificial_uses_at_bottom = BITMAP_ALLOC (NULL);
e011eba9 1637
1638 /* Need to make sure that there is a record in the basic block info. */
1639 if (!bb_info)
1640 {
1641 bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
1642 df_scan_set_bb_info (dflow, bb->index, bb_info);
1643 bb_info->artificial_defs = NULL;
1644 bb_info->artificial_uses = NULL;
1645 }
1646
1647 /* Scan the block an insn at a time from beginning to end. */
1648 FOR_BB_INSNS (bb, insn)
1649 {
1650 df_insn_create_insn_record (dflow, insn);
1651 if (INSN_P (insn))
1652 {
1653 /* Record defs within INSN. */
1654 DF_INSN_LUID (df, insn) = luid++;
1655 df_insn_refs_record (dflow, bb, insn);
1656 }
1657 DF_INSN_LUID (df, insn) = luid;
1658 }
1659
1660#ifdef EH_RETURN_DATA_REGNO
3e6933a8 1661 if ((dflow->flags & DF_HARD_REGS)
e011eba9 1662 && df_has_eh_preds (bb))
1663 {
1664 unsigned int i;
1665 /* Mark the registers that will contain data for the handler. */
fcf2ad9f 1666 for (i = 0; ; ++i)
1667 {
1668 unsigned regno = EH_RETURN_DATA_REGNO (i);
1669 if (regno == INVALID_REGNUM)
1670 break;
d0bded31 1671 df_ref_record (dflow, regno_reg_rtx[regno], &regno_reg_rtx[regno],
1672 bb, NULL,
fcf2ad9f 1673 DF_REF_REG_DEF, DF_REF_ARTIFICIAL | DF_REF_AT_TOP,
1674 false);
1675 }
e011eba9 1676 }
1677#endif
1678
fcf2ad9f 1679
3e6933a8 1680 if ((dflow->flags & DF_HARD_REGS)
e011eba9 1681 && df_has_eh_preds (bb))
1682 {
fcf2ad9f 1683#ifdef EH_USES
e011eba9 1684 unsigned int i;
fcf2ad9f 1685 /* This code is putting in a artificial ref for the use at the
1686 TOP of the block that receives the exception. It is too
1687 cumbersome to actually put the ref on the edge. We could
1688 either model this at the top of the receiver block or the
1689 bottom of the sender block.
1690
1691 The bottom of the sender block is problematic because not all
1692 out-edges of the a block are eh-edges. However, it is true
1693 that all edges into a block are either eh-edges or none of
1694 them are eh-edges. Thus, we can model this at the top of the
1695 eh-receiver for all of the edges at once. */
e011eba9 1696 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1697 if (EH_USES (i))
1698 df_uses_record (dflow, &regno_reg_rtx[i],
ede04110 1699 DF_REF_REG_USE, bb, NULL,
fcf2ad9f 1700 DF_REF_ARTIFICIAL | DF_REF_AT_TOP);
1701#endif
1702
9ca2c29a 1703 /* The following code (down thru the arg_pointer setting APPEARS
fcf2ad9f 1704 to be necessary because there is nothing that actually
1705 describes what the exception handling code may actually need
1706 to keep alive. */
1707 if (reload_completed)
1708 {
1709 if (frame_pointer_needed)
1710 {
3e6933a8 1711 bitmap_set_bit (artificial_uses_at_bottom, FRAME_POINTER_REGNUM);
fcf2ad9f 1712#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
3e6933a8 1713 bitmap_set_bit (artificial_uses_at_bottom, HARD_FRAME_POINTER_REGNUM);
fcf2ad9f 1714#endif
1715 }
1716#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1717 if (fixed_regs[ARG_POINTER_REGNUM])
3e6933a8 1718 bitmap_set_bit (artificial_uses_at_bottom, ARG_POINTER_REGNUM);
e011eba9 1719#endif
fcf2ad9f 1720 }
1721 }
e011eba9 1722
3e6933a8 1723 if ((dflow->flags & DF_HARD_REGS)
e011eba9 1724 && bb->index >= NUM_FIXED_BLOCKS)
1725 {
1726 /* Before reload, there are a few registers that must be forced
1727 live everywhere -- which might not already be the case for
1728 blocks within infinite loops. */
3e6933a8 1729 if (!reload_completed)
e011eba9 1730 {
1731
1732 /* Any reference to any pseudo before reload is a potential
1733 reference of the frame pointer. */
3e6933a8 1734 bitmap_set_bit (artificial_uses_at_bottom, FRAME_POINTER_REGNUM);
e011eba9 1735
1736#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1737 /* Pseudos with argument area equivalences may require
1738 reloading via the argument pointer. */
1739 if (fixed_regs[ARG_POINTER_REGNUM])
3e6933a8 1740 bitmap_set_bit (artificial_uses_at_bottom, ARG_POINTER_REGNUM);
e011eba9 1741#endif
1742
1743 /* Any constant, or pseudo with constant equivalences, may
1744 require reloading from memory using the pic register. */
1745 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
1746 && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
3e6933a8 1747 bitmap_set_bit (artificial_uses_at_bottom, PIC_OFFSET_TABLE_REGNUM);
e011eba9 1748 }
1749 /* The all-important stack pointer must always be live. */
3e6933a8 1750 bitmap_set_bit (artificial_uses_at_bottom, STACK_POINTER_REGNUM);
1751 }
1752
1753 if (dflow->flags & DF_HARD_REGS)
1754 {
1755 bitmap_iterator bi;
1756 unsigned int regno;
1757
1758 EXECUTE_IF_SET_IN_BITMAP (artificial_uses_at_bottom, 0, regno, bi)
1759 {
1760 df_uses_record (dflow, &regno_reg_rtx[regno],
1761 DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL);
1762 }
1763
1764 BITMAP_FREE (artificial_uses_at_bottom);
e011eba9 1765 }
1766}
1767
05268a5f 1768/* Records the implicit definitions at targets of nonlocal gotos in BLOCKS. */
1769
1770static void
1771record_nonlocal_goto_receiver_defs (struct dataflow *dflow, bitmap blocks)
1772{
1773 rtx x;
1774 basic_block bb;
1775
1776 /* See expand_builtin_setjmp_receiver; hard_frame_pointer_rtx is used in
1777 the nonlocal goto receiver, and needs to be considered defined
1778 implicitly. */
1779 if (!(dflow->flags & DF_HARD_REGS))
1780 return;
1781
1782 for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1))
1783 {
1784 bb = BLOCK_FOR_INSN (XEXP (x, 0));
1785 if (!bitmap_bit_p (blocks, bb->index))
1786 continue;
1787
1788 df_ref_record (dflow, hard_frame_pointer_rtx, &hard_frame_pointer_rtx,
1789 bb, NULL,
1790 DF_REF_REG_DEF, DF_REF_ARTIFICIAL | DF_REF_AT_TOP,
1791 false);
1792 }
1793}
e011eba9 1794
1795/* Record all the refs in the basic blocks specified by BLOCKS. */
1796
1797static void
1798df_refs_record (struct dataflow *dflow, bitmap blocks)
1799{
1800 unsigned int bb_index;
1801 bitmap_iterator bi;
1802
1803 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
1804 {
1805 basic_block bb = BASIC_BLOCK (bb_index);
1806 df_bb_refs_record (dflow, bb);
1807 }
1808
1809 if (bitmap_bit_p (blocks, EXIT_BLOCK))
1810 df_record_exit_block_uses (dflow);
fcf2ad9f 1811
1812 if (bitmap_bit_p (blocks, ENTRY_BLOCK))
1813 df_record_entry_block_defs (dflow);
05268a5f 1814
1815 if (current_function_has_nonlocal_label)
1816 record_nonlocal_goto_receiver_defs (dflow, blocks);
e011eba9 1817}
1818
1819
1820/*----------------------------------------------------------------------------
1821 Specialized hard register scanning functions.
1822----------------------------------------------------------------------------*/
1823
1824/* Mark a register in SET. Hard registers in large modes get all
1825 of their component registers set as well. */
1826
1827static void
1828df_mark_reg (rtx reg, void *vset)
1829{
1830 bitmap set = (bitmap) vset;
1831 int regno = REGNO (reg);
1832
1833 gcc_assert (GET_MODE (reg) != BLKmode);
1834
1835 bitmap_set_bit (set, regno);
1836 if (regno < FIRST_PSEUDO_REGISTER)
1837 {
1838 int n = hard_regno_nregs[regno][GET_MODE (reg)];
1839 while (--n > 0)
1840 bitmap_set_bit (set, regno + n);
1841 }
1842}
1843
fcf2ad9f 1844
1845/* Record the (conservative) set of hard registers that are defined on
1846 entry to the function. */
1847
1848static void
3e6933a8 1849df_record_entry_block_defs (struct dataflow *dflow)
fcf2ad9f 1850{
1851 unsigned int i;
1852 bitmap_iterator bi;
1853 rtx r;
3e6933a8 1854 struct df *df = dflow->df;
fcf2ad9f 1855
1856 bitmap_clear (df->entry_block_defs);
1857
3e6933a8 1858 if (!(dflow->flags & DF_HARD_REGS))
fcf2ad9f 1859 return;
1860
1861 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1862 {
1863 if (FUNCTION_ARG_REGNO_P (i))
1864#ifdef INCOMING_REGNO
1865 bitmap_set_bit (df->entry_block_defs, INCOMING_REGNO (i));
1866#else
1867 bitmap_set_bit (df->entry_block_defs, i);
1868#endif
1869 }
1870
1871 /* Once the prologue has been generated, all of these registers
1872 should just show up in the first regular block. */
1873 if (HAVE_prologue && epilogue_completed)
1874 {
1875 /* Defs for the callee saved registers are inserted so that the
1876 pushes have some defining location. */
1877 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1878 if ((call_used_regs[i] == 0) && (regs_ever_live[i]))
1879 bitmap_set_bit (df->entry_block_defs, i);
1880 }
1881 else
1882 {
3e6933a8 1883 /* The always important stack pointer. */
1884 bitmap_set_bit (df->entry_block_defs, STACK_POINTER_REGNUM);
1885
abecc17d 1886#ifdef INCOMING_RETURN_ADDR_RTX
fcf2ad9f 1887 if (REG_P (INCOMING_RETURN_ADDR_RTX))
1888 bitmap_set_bit (df->entry_block_defs, REGNO (INCOMING_RETURN_ADDR_RTX));
abecc17d 1889#endif
fcf2ad9f 1890
1891 /* If STATIC_CHAIN_INCOMING_REGNUM == STATIC_CHAIN_REGNUM
1892 only STATIC_CHAIN_REGNUM is defined. If they are different,
1893 we only care about the STATIC_CHAIN_INCOMING_REGNUM. */
1894#ifdef STATIC_CHAIN_INCOMING_REGNUM
1895 bitmap_set_bit (df->entry_block_defs, STATIC_CHAIN_INCOMING_REGNUM);
1896#else
1897#ifdef STATIC_CHAIN_REGNUM
1898 bitmap_set_bit (df->entry_block_defs, STATIC_CHAIN_REGNUM);
1899#endif
1900#endif
1901
1902 r = TARGET_STRUCT_VALUE_RTX (current_function_decl, true);
1903 if (r && REG_P (r))
1904 bitmap_set_bit (df->entry_block_defs, REGNO (r));
1905 }
1906
3e6933a8 1907 if ((!reload_completed) || frame_pointer_needed)
fcf2ad9f 1908 {
1909 /* Any reference to any pseudo before reload is a potential
1910 reference of the frame pointer. */
1911 bitmap_set_bit (df->entry_block_defs, FRAME_POINTER_REGNUM);
3e6933a8 1912#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1913 /* If they are different, also mark the hard frame pointer as live. */
1914 if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
1915 bitmap_set_bit (df->entry_block_defs, HARD_FRAME_POINTER_REGNUM);
1916#endif
1917 }
fcf2ad9f 1918
3e6933a8 1919 /* These registers are live everywhere. */
1920 if (!reload_completed)
1921 {
fcf2ad9f 1922#ifdef EH_USES
1923 /* The ia-64, the only machine that uses this, does not define these
1924 until after reload. */
1925 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1926 if (EH_USES (i))
1927 {
1928 bitmap_set_bit (df->entry_block_defs, i);
1929 }
1930#endif
1931
1932#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1933 /* Pseudos with argument area equivalences may require
1934 reloading via the argument pointer. */
1935 if (fixed_regs[ARG_POINTER_REGNUM])
1936 bitmap_set_bit (df->entry_block_defs, ARG_POINTER_REGNUM);
1937#endif
1938
1939#ifdef PIC_OFFSET_TABLE_REGNUM
1940 /* Any constant, or pseudo with constant equivalences, may
1941 require reloading from memory using the pic register. */
1942 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
1943 && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
1944 bitmap_set_bit (df->entry_block_defs, PIC_OFFSET_TABLE_REGNUM);
1945#endif
1946 }
1947
1c1a6437 1948 targetm.live_on_entry (df->entry_block_defs);
fcf2ad9f 1949
1950 EXECUTE_IF_SET_IN_BITMAP (df->entry_block_defs, 0, i, bi)
1951 {
1952 df_ref_record (dflow, regno_reg_rtx[i], &regno_reg_rtx[i],
1953 ENTRY_BLOCK_PTR, NULL,
1954 DF_REF_REG_DEF, DF_REF_ARTIFICIAL , false);
1955 }
1956}
1957
1958
e011eba9 1959/* Record the set of hard registers that are used in the exit block. */
1960
1961static void
1962df_record_exit_block_uses (struct dataflow *dflow)
1963{
1964 unsigned int i;
1965 bitmap_iterator bi;
1966 struct df *df = dflow->df;
1967
1968 bitmap_clear (df->exit_block_uses);
1969
3e6933a8 1970 if (!(dflow->flags & DF_HARD_REGS))
e011eba9 1971 return;
1972
1973 /* If exiting needs the right stack value, consider the stack
1974 pointer live at the end of the function. */
1975 if ((HAVE_epilogue && epilogue_completed)
3e6933a8 1976 || !EXIT_IGNORE_STACK
1977 || (!FRAME_POINTER_REQUIRED
1978 && !current_function_calls_alloca
e011eba9 1979 && flag_omit_frame_pointer)
1980 || current_function_sp_is_unchanging)
1981 {
1982 bitmap_set_bit (df->exit_block_uses, STACK_POINTER_REGNUM);
1983 }
1984
1985 /* Mark the frame pointer if needed at the end of the function.
1986 If we end up eliminating it, it will be removed from the live
1987 list of each basic block by reload. */
1988
3e6933a8 1989 if ((!reload_completed) || frame_pointer_needed)
e011eba9 1990 {
1991 bitmap_set_bit (df->exit_block_uses, FRAME_POINTER_REGNUM);
1992#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1993 /* If they are different, also mark the hard frame pointer as live. */
3e6933a8 1994 if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
e011eba9 1995 bitmap_set_bit (df->exit_block_uses, HARD_FRAME_POINTER_REGNUM);
1996#endif
1997 }
1998
1999#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
2000 /* Many architectures have a GP register even without flag_pic.
2001 Assume the pic register is not in use, or will be handled by
2002 other means, if it is not fixed. */
2003 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
2004 && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
2005 bitmap_set_bit (df->exit_block_uses, PIC_OFFSET_TABLE_REGNUM);
2006#endif
2007
2008 /* Mark all global registers, and all registers used by the
2009 epilogue as being live at the end of the function since they
2010 may be referenced by our caller. */
2011 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2012 if (global_regs[i] || EPILOGUE_USES (i))
2013 bitmap_set_bit (df->exit_block_uses, i);
2014
2015 if (HAVE_epilogue && epilogue_completed)
2016 {
2017 /* Mark all call-saved registers that we actually used. */
2018 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3e6933a8 2019 if (regs_ever_live[i] && !LOCAL_REGNO (i)
2020 && !TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
e011eba9 2021 bitmap_set_bit (df->exit_block_uses, i);
2022 }
2023
2024#ifdef EH_RETURN_DATA_REGNO
2025 /* Mark the registers that will contain data for the handler. */
2026 if (reload_completed && current_function_calls_eh_return)
2027 for (i = 0; ; ++i)
2028 {
2029 unsigned regno = EH_RETURN_DATA_REGNO (i);
2030 if (regno == INVALID_REGNUM)
2031 break;
2032 bitmap_set_bit (df->exit_block_uses, regno);
2033 }
2034#endif
2035
2036#ifdef EH_RETURN_STACKADJ_RTX
3e6933a8 2037 if ((!HAVE_epilogue || ! epilogue_completed)
e011eba9 2038 && current_function_calls_eh_return)
2039 {
2040 rtx tmp = EH_RETURN_STACKADJ_RTX;
2041 if (tmp && REG_P (tmp))
2042 df_mark_reg (tmp, df->exit_block_uses);
2043 }
2044#endif
2045
2046#ifdef EH_RETURN_HANDLER_RTX
3e6933a8 2047 if ((!HAVE_epilogue || ! epilogue_completed)
e011eba9 2048 && current_function_calls_eh_return)
2049 {
2050 rtx tmp = EH_RETURN_HANDLER_RTX;
2051 if (tmp && REG_P (tmp))
2052 df_mark_reg (tmp, df->exit_block_uses);
2053 }
2054#endif
2055
2056 /* Mark function return value. */
2057 diddle_return_value (df_mark_reg, (void*) df->exit_block_uses);
2058
3e6933a8 2059 if (dflow->flags & DF_HARD_REGS)
e011eba9 2060 EXECUTE_IF_SET_IN_BITMAP (df->exit_block_uses, 0, i, bi)
2061 df_uses_record (dflow, &regno_reg_rtx[i],
2062 DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL,
2063 DF_REF_ARTIFICIAL);
2064}
2065
2066static bool initialized = false;
2067
2068/* Initialize some platform specific structures. */
2069
2070void
2071df_hard_reg_init (void)
2072{
e011eba9 2073 int i;
bebf8106 2074#ifdef ELIMINABLE_REGS
e011eba9 2075 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
2076#endif
2077 /* After reload, some ports add certain bits to regs_ever_live so
2078 this cannot be reset. */
2079
2080 if (!reload_completed)
2081 memset (regs_ever_live, 0, sizeof (regs_ever_live));
2082
2083 if (initialized)
2084 return;
2085
2086 bitmap_obstack_initialize (&persistent_obstack);
2087
2088 /* Record which registers will be eliminated. We use this in
2089 mark_used_regs. */
2090 CLEAR_HARD_REG_SET (elim_reg_set);
2091
2092#ifdef ELIMINABLE_REGS
2093 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
2094 SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
2095#else
2096 SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
2097#endif
2098
2099 df_invalidated_by_call = BITMAP_ALLOC (&persistent_obstack);
2100
2101 /* Inconveniently, this is only readily available in hard reg set
2102 form. */
2103 for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
2104 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2105 bitmap_set_bit (df_invalidated_by_call, i);
2106
e011eba9 2107 initialized = true;
2108}