]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/df-scan.c
gcc/
[thirdparty/gcc.git] / gcc / df-scan.c
CommitLineData
e011eba9 1/* Scanning of rtl for dataflow analysis.
d353bf18 2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
48e1416a 3 Originally contributed by Michael P. Hayes
e011eba9 4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
8c4c00c1 12Software Foundation; either version 3, or (at your option) any later
e011eba9 13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
8c4c00c1 21along with GCC; see the file COPYING3. If not see
22<http://www.gnu.org/licenses/>. */
e011eba9 23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tm_p.h"
30#include "insn-config.h"
31#include "recog.h"
a3020f2f 32#include "hashtab.h"
33#include "hash-set.h"
a3020f2f 34#include "machmode.h"
b20a8bb4 35#include "vec.h"
36#include "double-int.h"
37#include "input.h"
38#include "alias.h"
39#include "symtab.h"
40#include "wide-int.h"
41#include "inchash.h"
a3020f2f 42#include "hard-reg-set.h"
43#include "input.h"
e011eba9 44#include "function.h"
45#include "regs.h"
e011eba9 46#include "alloc-pool.h"
47#include "flags.h"
94ea8568 48#include "predict.h"
49#include "dominance.h"
50#include "cfg.h"
e011eba9 51#include "basic-block.h"
52#include "sbitmap.h"
53#include "bitmap.h"
b9ed1410 54#include "dumpfile.h"
fcf2ad9f 55#include "tree.h"
56#include "target.h"
57#include "target-def.h"
e011eba9 58#include "df.h"
06f9d6ef 59#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
e011eba9 60
f7f05a07 61
62typedef struct df_mw_hardreg *df_mw_hardreg_ptr;
63
f7f05a07 64
e011eba9 65#ifndef HAVE_prologue
66#define HAVE_prologue 0
67#endif
68#ifndef HAVE_sibcall_epilogue
69#define HAVE_sibcall_epilogue 0
70#endif
71
e011eba9 72/* The set of hard registers in eliminables[i].from. */
73
74static HARD_REG_SET elim_reg_set;
75
e011eba9 76/* Initialize ur_in and ur_out as if all hard registers were partially
77 available. */
78
3072d30e 79struct df_collection_rec
80{
4997014d 81 auto_vec<df_ref, 128> def_vec;
82 auto_vec<df_ref, 32> use_vec;
83 auto_vec<df_ref, 32> eq_use_vec;
84 auto_vec<df_mw_hardreg_ptr, 32> mw_vec;
3072d30e 85};
86
ed6e85ae 87static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
48e1416a 88 rtx, rtx *,
158b6cc9 89 basic_block, struct df_insn_info *,
c989ecc1 90 enum df_ref_type, int ref_flags);
f7583c76 91static void df_def_record_1 (struct df_collection_rec *, rtx *,
158b6cc9 92 basic_block, struct df_insn_info *,
b9c74b4d 93 int ref_flags);
158b6cc9 94static void df_defs_record (struct df_collection_rec *, rtx,
95 basic_block, struct df_insn_info *,
b9c74b4d 96 int ref_flags);
c989ecc1 97static void df_uses_record (struct df_collection_rec *,
3072d30e 98 rtx *, enum df_ref_type,
158b6cc9 99 basic_block, struct df_insn_info *,
c989ecc1 100 int ref_flags);
e011eba9 101
4ffe0526 102static void df_install_ref_incremental (df_ref);
48e1416a 103static void df_insn_refs_collect (struct df_collection_rec*,
104 basic_block, struct df_insn_info *);
3072d30e 105static void df_canonize_collection_rec (struct df_collection_rec *);
106
107static void df_get_regular_block_artificial_uses (bitmap);
108static void df_get_eh_block_artificial_uses (bitmap);
109
110static void df_record_entry_block_defs (bitmap);
111static void df_record_exit_block_uses (bitmap);
112static void df_get_exit_block_use_set (bitmap);
113static void df_get_entry_block_def_set (bitmap);
e011eba9 114static void df_grow_ref_info (struct df_ref_info *, unsigned int);
ddc2d0e3 115static void df_ref_chain_delete_du_chain (df_ref);
116static void df_ref_chain_delete (df_ref);
e011eba9 117
48e1416a 118static void df_refs_add_to_chains (struct df_collection_rec *,
e149ca56 119 basic_block, rtx_insn *, unsigned int);
3072d30e 120
e149ca56 121static bool df_insn_refs_verify (struct df_collection_rec *, basic_block,
122 rtx_insn *, bool);
3072d30e 123static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap);
124static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap);
48e1416a 125static void df_install_ref (df_ref, struct df_reg_info *,
3072d30e 126 struct df_ref_info *, bool);
127
ddc2d0e3 128static int df_ref_compare (df_ref, df_ref);
129static int df_ref_ptr_compare (const void *, const void *);
130static int df_mw_compare (const df_mw_hardreg *, const df_mw_hardreg *);
131static int df_mw_ptr_compare (const void *, const void *);
3072d30e 132
b983ea33 133static void df_insn_info_delete (unsigned int);
134
3072d30e 135/* Indexed by hardware reg number, is true if that register is ever
136 used in the current function.
137
138 In df-scan.c, this is set up to record the hard regs used
139 explicitly. Reload adds in the hard regs used for holding pseudo
140 regs. Final uses it to generate the code in the function prologue
141 and epilogue to save and restore registers as needed. */
142
143static bool regs_ever_live[FIRST_PSEUDO_REGISTER];
d70aebca 144
145/* Flags used to tell df_refs_add_to_chains() which vectors it should copy. */
146static const unsigned int copy_defs = 0x1;
147static const unsigned int copy_uses = 0x2;
148static const unsigned int copy_eq_uses = 0x4;
149static const unsigned int copy_mw = 0x8;
150static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
151| copy_mw;
e011eba9 152\f
153/*----------------------------------------------------------------------------
154 SCANNING DATAFLOW PROBLEM
155
156 There are several ways in which scanning looks just like the other
157 dataflow problems. It shares the all the mechanisms for local info
158 as well as basic block info. Where it differs is when and how often
159 it gets run. It also has no need for the iterative solver.
160----------------------------------------------------------------------------*/
161
162/* Problem data for the scanning dataflow function. */
163struct df_scan_problem_data
164{
ed6e85ae 165 alloc_pool ref_base_pool;
166 alloc_pool ref_artificial_pool;
167 alloc_pool ref_regular_pool;
e011eba9 168 alloc_pool insn_pool;
169 alloc_pool reg_pool;
3e6933a8 170 alloc_pool mw_reg_pool;
3072d30e 171 bitmap_obstack reg_bitmaps;
172 bitmap_obstack insn_bitmaps;
e011eba9 173};
174
175typedef struct df_scan_bb_info *df_scan_bb_info_t;
176
ce299759 177
178/* Internal function to shut down the scanning problem. */
48e1416a 179static void
3072d30e 180df_scan_free_internal (void)
e011eba9 181{
3e6933a8 182 struct df_scan_problem_data *problem_data
3072d30e 183 = (struct df_scan_problem_data *) df_scan->problem_data;
e011eba9 184
e011eba9 185 free (df->def_info.refs);
3072d30e 186 free (df->def_info.begin);
187 free (df->def_info.count);
e011eba9 188 memset (&df->def_info, 0, (sizeof (struct df_ref_info)));
189
e011eba9 190 free (df->use_info.refs);
3072d30e 191 free (df->use_info.begin);
192 free (df->use_info.count);
e011eba9 193 memset (&df->use_info, 0, (sizeof (struct df_ref_info)));
194
3072d30e 195 free (df->def_regs);
196 df->def_regs = NULL;
197 free (df->use_regs);
198 df->use_regs = NULL;
199 free (df->eq_use_regs);
200 df->eq_use_regs = NULL;
201 df->regs_size = 0;
9af5ce0c 202 DF_REG_SIZE (df) = 0;
3072d30e 203
e011eba9 204 free (df->insns);
205 df->insns = NULL;
3072d30e 206 DF_INSN_SIZE () = 0;
e011eba9 207
3072d30e 208 free (df_scan->block_info);
209 df_scan->block_info = NULL;
210 df_scan->block_info_size = 0;
e011eba9 211
4b5a4301 212 bitmap_clear (&df->hardware_regs_used);
213 bitmap_clear (&df->regular_block_artificial_uses);
214 bitmap_clear (&df->eh_block_artificial_uses);
fcf2ad9f 215 BITMAP_FREE (df->entry_block_defs);
e011eba9 216 BITMAP_FREE (df->exit_block_uses);
4b5a4301 217 bitmap_clear (&df->insns_to_delete);
218 bitmap_clear (&df->insns_to_rescan);
219 bitmap_clear (&df->insns_to_notes_rescan);
e011eba9 220
ed6e85ae 221 free_alloc_pool (problem_data->ref_base_pool);
222 free_alloc_pool (problem_data->ref_artificial_pool);
223 free_alloc_pool (problem_data->ref_regular_pool);
e011eba9 224 free_alloc_pool (problem_data->insn_pool);
225 free_alloc_pool (problem_data->reg_pool);
3e6933a8 226 free_alloc_pool (problem_data->mw_reg_pool);
3072d30e 227 bitmap_obstack_release (&problem_data->reg_bitmaps);
228 bitmap_obstack_release (&problem_data->insn_bitmaps);
229 free (df_scan->problem_data);
e011eba9 230}
231
232
e011eba9 233/* Free basic block info. */
234
235static void
3072d30e 236df_scan_free_bb_info (basic_block bb, void *vbb_info)
e011eba9 237{
238 struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info;
3072d30e 239 unsigned int bb_index = bb->index;
7a6a083c 240 rtx_insn *insn;
369ea98d 241
ddc2d0e3 242 FOR_BB_INSNS (bb, insn)
243 if (INSN_P (insn))
244 df_insn_info_delete (INSN_UID (insn));
245
246 if (bb_index < df_scan->block_info_size)
247 bb_info = df_scan_get_bb_info (bb_index);
248
249 /* Get rid of any artificial uses or defs. */
250 df_ref_chain_delete_du_chain (bb_info->artificial_defs);
251 df_ref_chain_delete_du_chain (bb_info->artificial_uses);
252 df_ref_chain_delete (bb_info->artificial_defs);
253 df_ref_chain_delete (bb_info->artificial_uses);
254 bb_info->artificial_defs = NULL;
255 bb_info->artificial_uses = NULL;
e011eba9 256}
257
258
259/* Allocate the problem data for the scanning problem. This should be
260 called when the problem is created or when the entire function is to
261 be rescanned. */
48e1416a 262void
3072d30e 263df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
e011eba9 264{
e011eba9 265 struct df_scan_problem_data *problem_data;
266 unsigned int insn_num = get_max_uid () + 1;
6940c58b 267 unsigned int block_size = 512;
3072d30e 268 basic_block bb;
e011eba9 269
270 /* Given the number of pools, this is really faster than tearing
271 everything apart. */
3072d30e 272 if (df_scan->problem_data)
273 df_scan_free_internal ();
e011eba9 274
4c36ffe6 275 problem_data = XNEW (struct df_scan_problem_data);
3072d30e 276 df_scan->problem_data = problem_data;
277 df_scan->computed = true;
e011eba9 278
48e1416a 279 problem_data->ref_base_pool
280 = create_alloc_pool ("df_scan ref base",
ed6e85ae 281 sizeof (struct df_base_ref), block_size);
48e1416a 282 problem_data->ref_artificial_pool
283 = create_alloc_pool ("df_scan ref artificial",
ed6e85ae 284 sizeof (struct df_artificial_ref), block_size);
48e1416a 285 problem_data->ref_regular_pool
286 = create_alloc_pool ("df_scan ref regular",
ed6e85ae 287 sizeof (struct df_regular_ref), block_size);
48e1416a 288 problem_data->insn_pool
289 = create_alloc_pool ("df_scan insn",
e011eba9 290 sizeof (struct df_insn_info), block_size);
48e1416a 291 problem_data->reg_pool
292 = create_alloc_pool ("df_scan reg",
e011eba9 293 sizeof (struct df_reg_info), block_size);
48e1416a 294 problem_data->mw_reg_pool
295 = create_alloc_pool ("df_scan mw_reg",
6940c58b 296 sizeof (struct df_mw_hardreg), block_size / 16);
e011eba9 297
3072d30e 298 bitmap_obstack_initialize (&problem_data->reg_bitmaps);
299 bitmap_obstack_initialize (&problem_data->insn_bitmaps);
e011eba9 300
48e1416a 301 insn_num += insn_num / 4;
3072d30e 302 df_grow_reg_info ();
e011eba9 303
3072d30e 304 df_grow_insn_info ();
305 df_grow_bb_info (df_scan);
e011eba9 306
ed7d889a 307 FOR_ALL_BB_FN (bb, cfun)
e011eba9 308 {
3072d30e 309 unsigned int bb_index = bb->index;
310 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb_index);
e011eba9 311 bb_info->artificial_defs = NULL;
312 bb_info->artificial_uses = NULL;
313 }
314
4b5a4301 315 bitmap_initialize (&df->hardware_regs_used, &problem_data->reg_bitmaps);
316 bitmap_initialize (&df->regular_block_artificial_uses, &problem_data->reg_bitmaps);
317 bitmap_initialize (&df->eh_block_artificial_uses, &problem_data->reg_bitmaps);
3072d30e 318 df->entry_block_defs = BITMAP_ALLOC (&problem_data->reg_bitmaps);
319 df->exit_block_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps);
4b5a4301 320 bitmap_initialize (&df->insns_to_delete, &problem_data->insn_bitmaps);
321 bitmap_initialize (&df->insns_to_rescan, &problem_data->insn_bitmaps);
322 bitmap_initialize (&df->insns_to_notes_rescan, &problem_data->insn_bitmaps);
deb2741b 323 df_scan->optional_p = false;
e011eba9 324}
325
326
327/* Free all of the data associated with the scan problem. */
328
48e1416a 329static void
3072d30e 330df_scan_free (void)
e011eba9 331{
3072d30e 332 if (df_scan->problem_data)
333 df_scan_free_internal ();
d0802b39 334
e011eba9 335 if (df->blocks_to_analyze)
3072d30e 336 {
337 BITMAP_FREE (df->blocks_to_analyze);
338 df->blocks_to_analyze = NULL;
339 }
e011eba9 340
3072d30e 341 free (df_scan);
e011eba9 342}
343
3072d30e 344/* Dump the preamble for DF_SCAN dump. */
48e1416a 345static void
3072d30e 346df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
e011eba9 347{
e011eba9 348 int i;
ed6e85ae 349 int dcount = 0;
350 int ucount = 0;
351 int ecount = 0;
352 int icount = 0;
353 int ccount = 0;
354 basic_block bb;
7a6a083c 355 rtx_insn *insn;
e011eba9 356
3072d30e 357 fprintf (file, ";; invalidated by call \t");
b888113c 358 df_print_regset (file, regs_invalidated_by_call_regset);
3072d30e 359 fprintf (file, ";; hardware regs used \t");
4b5a4301 360 df_print_regset (file, &df->hardware_regs_used);
3072d30e 361 fprintf (file, ";; regular block artificial uses \t");
4b5a4301 362 df_print_regset (file, &df->regular_block_artificial_uses);
3072d30e 363 fprintf (file, ";; eh block artificial uses \t");
4b5a4301 364 df_print_regset (file, &df->eh_block_artificial_uses);
3072d30e 365 fprintf (file, ";; entry block defs \t");
366 df_print_regset (file, df->entry_block_defs);
367 fprintf (file, ";; exit block uses \t");
368 df_print_regset (file, df->exit_block_uses);
369 fprintf (file, ";; regs ever live \t");
e011eba9 370 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3072d30e 371 if (df_regs_ever_live_p (i))
372 fprintf (file, " %d[%s]", i, reg_names[i]);
ed6e85ae 373 fprintf (file, "\n;; ref usage \t");
48e1416a 374
ed6e85ae 375 for (i = 0; i < (int)df->regs_inited; i++)
376 if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i) || DF_REG_EQ_USE_COUNT (i))
377 {
378 const char * sep = "";
379
380 fprintf (file, "r%d={", i);
381 if (DF_REG_DEF_COUNT (i))
382 {
383 fprintf (file, "%dd", DF_REG_DEF_COUNT (i));
384 sep = ",";
385 dcount += DF_REG_DEF_COUNT (i);
386 }
387 if (DF_REG_USE_COUNT (i))
388 {
389 fprintf (file, "%s%du", sep, DF_REG_USE_COUNT (i));
390 sep = ",";
391 ucount += DF_REG_USE_COUNT (i);
392 }
393 if (DF_REG_EQ_USE_COUNT (i))
394 {
09669349 395 fprintf (file, "%s%de", sep, DF_REG_EQ_USE_COUNT (i));
ed6e85ae 396 ecount += DF_REG_EQ_USE_COUNT (i);
397 }
398 fprintf (file, "} ");
399 }
3072d30e 400
fc00614f 401 FOR_EACH_BB_FN (bb, cfun)
ed6e85ae 402 FOR_BB_INSNS (bb, insn)
403 if (INSN_P (insn))
404 {
405 if (CALL_P (insn))
406 ccount++;
407 else
408 icount++;
409 }
410
09669349 411 fprintf (file, "\n;; total ref usage %d{%dd,%du,%de}"
412 " in %d{%d regular + %d call} insns.\n",
413 dcount + ucount + ecount, dcount, ucount, ecount,
414 icount + ccount, icount, ccount);
e011eba9 415}
416
3072d30e 417/* Dump the bb_info for a given basic block. */
48e1416a 418static void
3072d30e 419df_scan_start_block (basic_block bb, FILE *file)
420{
421 struct df_scan_bb_info *bb_info
422 = df_scan_get_bb_info (bb->index);
423
424 if (bb_info)
425 {
426 fprintf (file, ";; bb %d artificial_defs: ", bb->index);
427 df_refs_chain_dump (bb_info->artificial_defs, true, file);
428 fprintf (file, "\n;; bb %d artificial_uses: ", bb->index);
429 df_refs_chain_dump (bb_info->artificial_uses, true, file);
430 fprintf (file, "\n");
431 }
432#if 0
433 {
7a6a083c 434 rtx_insn *insn;
3072d30e 435 FOR_BB_INSNS (bb, insn)
436 if (INSN_P (insn))
437 df_insn_debug (insn, false, file);
438 }
439#endif
440}
441
e011eba9 442static struct df_problem problem_SCAN =
443{
444 DF_SCAN, /* Problem id. */
445 DF_NONE, /* Direction. */
446 df_scan_alloc, /* Allocate the problem specific data. */
f64e6a69 447 NULL, /* Reset global information. */
e011eba9 448 df_scan_free_bb_info, /* Free basic block info. */
449 NULL, /* Local compute function. */
450 NULL, /* Init the solution specific data. */
451 NULL, /* Iterative solver. */
48e1416a 452 NULL, /* Confluence operator 0. */
453 NULL, /* Confluence operator n. */
e011eba9 454 NULL, /* Transfer function. */
455 NULL, /* Finalize function. */
456 df_scan_free, /* Free all of the problem information. */
3072d30e 457 NULL, /* Remove this problem from the stack of dataflow problems. */
458 df_scan_start_dump, /* Debugging. */
459 df_scan_start_block, /* Debugging start block. */
460 NULL, /* Debugging end block. */
ea9538fb 461 NULL, /* Debugging start insn. */
462 NULL, /* Debugging end insn. */
3072d30e 463 NULL, /* Incremental solution verify start. */
6dfdc153 464 NULL, /* Incremental solution verify end. */
3e6933a8 465 NULL, /* Dependent problem. */
369ea98d 466 sizeof (struct df_scan_bb_info),/* Size of entry of block_info array. */
deb2741b 467 TV_DF_SCAN, /* Timing variable. */
468 false /* Reset blocks on dropping out of blocks_to_analyze. */
e011eba9 469};
470
471
472/* Create a new DATAFLOW instance and add it to an existing instance
473 of DF. The returned structure is what is used to get at the
474 solution. */
475
3072d30e 476void
477df_scan_add_problem (void)
e011eba9 478{
3072d30e 479 df_add_problem (&problem_SCAN);
e011eba9 480}
481
3072d30e 482\f
e011eba9 483/*----------------------------------------------------------------------------
484 Storage Allocation Utilities
485----------------------------------------------------------------------------*/
486
487
488/* First, grow the reg_info information. If the current size is less than
f0b5f617 489 the number of pseudos, grow to 25% more than the number of
48e1416a 490 pseudos.
e011eba9 491
492 Second, assure that all of the slots up to max_reg_num have been
493 filled with reg_info structures. */
494
48e1416a 495void
3072d30e 496df_grow_reg_info (void)
e011eba9 497{
498 unsigned int max_reg = max_reg_num ();
499 unsigned int new_size = max_reg;
3e6933a8 500 struct df_scan_problem_data *problem_data
3072d30e 501 = (struct df_scan_problem_data *) df_scan->problem_data;
e011eba9 502 unsigned int i;
503
3072d30e 504 if (df->regs_size < new_size)
e011eba9 505 {
506 new_size += new_size / 4;
364c0c59 507 df->def_regs = XRESIZEVEC (struct df_reg_info *, df->def_regs, new_size);
508 df->use_regs = XRESIZEVEC (struct df_reg_info *, df->use_regs, new_size);
509 df->eq_use_regs = XRESIZEVEC (struct df_reg_info *, df->eq_use_regs,
510 new_size);
511 df->def_info.begin = XRESIZEVEC (unsigned, df->def_info.begin, new_size);
512 df->def_info.count = XRESIZEVEC (unsigned, df->def_info.count, new_size);
513 df->use_info.begin = XRESIZEVEC (unsigned, df->use_info.begin, new_size);
514 df->use_info.count = XRESIZEVEC (unsigned, df->use_info.count, new_size);
3072d30e 515 df->regs_size = new_size;
e011eba9 516 }
517
3072d30e 518 for (i = df->regs_inited; i < max_reg; i++)
e011eba9 519 {
3072d30e 520 struct df_reg_info *reg_info;
521
364c0c59 522 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
3072d30e 523 memset (reg_info, 0, sizeof (struct df_reg_info));
524 df->def_regs[i] = reg_info;
364c0c59 525 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
3072d30e 526 memset (reg_info, 0, sizeof (struct df_reg_info));
527 df->use_regs[i] = reg_info;
364c0c59 528 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
e011eba9 529 memset (reg_info, 0, sizeof (struct df_reg_info));
3072d30e 530 df->eq_use_regs[i] = reg_info;
531 df->def_info.begin[i] = 0;
532 df->def_info.count[i] = 0;
533 df->use_info.begin[i] = 0;
534 df->use_info.count[i] = 0;
e011eba9 535 }
48e1416a 536
3072d30e 537 df->regs_inited = max_reg;
e011eba9 538}
539
540
541/* Grow the ref information. */
542
48e1416a 543static void
e011eba9 544df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
545{
546 if (ref_info->refs_size < new_size)
547 {
ed6e85ae 548 ref_info->refs = XRESIZEVEC (df_ref, ref_info->refs, new_size);
e011eba9 549 memset (ref_info->refs + ref_info->refs_size, 0,
ed6e85ae 550 (new_size - ref_info->refs_size) *sizeof (df_ref));
e011eba9 551 ref_info->refs_size = new_size;
552 }
553}
554
555
3072d30e 556/* Check and grow the ref information if necessary. This routine
557 guarantees total_size + BITMAP_ADDEND amount of entries in refs
558 array. It updates ref_info->refs_size only and does not change
559 ref_info->total_size. */
560
561static void
48e1416a 562df_check_and_grow_ref_info (struct df_ref_info *ref_info,
3072d30e 563 unsigned bitmap_addend)
564{
565 if (ref_info->refs_size < ref_info->total_size + bitmap_addend)
566 {
567 int new_size = ref_info->total_size + bitmap_addend;
568 new_size += ref_info->total_size / 4;
569 df_grow_ref_info (ref_info, new_size);
570 }
571}
572
573
e011eba9 574/* Grow the ref information. If the current size is less than the
575 number of instructions, grow to 25% more than the number of
576 instructions. */
577
48e1416a 578void
3072d30e 579df_grow_insn_info (void)
e011eba9 580{
581 unsigned int new_size = get_max_uid () + 1;
3072d30e 582 if (DF_INSN_SIZE () < new_size)
e011eba9 583 {
584 new_size += new_size / 4;
364c0c59 585 df->insns = XRESIZEVEC (struct df_insn_info *, df->insns, new_size);
e011eba9 586 memset (df->insns + df->insns_size, 0,
3072d30e 587 (new_size - DF_INSN_SIZE ()) *sizeof (struct df_insn_info *));
588 DF_INSN_SIZE () = new_size;
e011eba9 589 }
590}
591
592
593
594\f
595/*----------------------------------------------------------------------------
596 PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
597----------------------------------------------------------------------------*/
598
3072d30e 599/* Rescan all of the block_to_analyze or all of the blocks in the
600 function if df_set_blocks if blocks_to_analyze is NULL; */
e011eba9 601
602void
3072d30e 603df_scan_blocks (void)
e011eba9 604{
e011eba9 605 basic_block bb;
606
3072d30e 607 df->def_info.ref_order = DF_REF_ORDER_NO_TABLE;
608 df->use_info.ref_order = DF_REF_ORDER_NO_TABLE;
3e6933a8 609
4b5a4301 610 df_get_regular_block_artificial_uses (&df->regular_block_artificial_uses);
611 df_get_eh_block_artificial_uses (&df->eh_block_artificial_uses);
e011eba9 612
4b5a4301 613 bitmap_ior_into (&df->eh_block_artificial_uses,
614 &df->regular_block_artificial_uses);
f64e6a69 615
3072d30e 616 /* ENTRY and EXIT blocks have special defs/uses. */
617 df_get_entry_block_def_set (df->entry_block_defs);
618 df_record_entry_block_defs (df->entry_block_defs);
619 df_get_exit_block_use_set (df->exit_block_uses);
620 df_record_exit_block_uses (df->exit_block_uses);
f5a6b05f 621 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK));
622 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
e011eba9 623
3072d30e 624 /* Regular blocks */
fc00614f 625 FOR_EACH_BB_FN (bb, cfun)
e011eba9 626 {
3072d30e 627 unsigned int bb_index = bb->index;
628 df_bb_refs_record (bb_index, true);
e011eba9 629 }
e011eba9 630}
631
4ffe0526 632/* Create new refs under address LOC within INSN. This function is
633 only used externally. REF_FLAGS must be either 0 or DF_REF_IN_NOTE,
634 depending on whether LOC is inside PATTERN (INSN) or a note. */
635
636void
e149ca56 637df_uses_create (rtx *loc, rtx_insn *insn, int ref_flags)
4ffe0526 638{
639 gcc_assert (!(ref_flags & ~DF_REF_IN_NOTE));
640 df_uses_record (NULL, loc, DF_REF_REG_USE,
641 BLOCK_FOR_INSN (insn),
642 DF_INSN_INFO_GET (insn),
643 ref_flags);
644}
3e6933a8 645
4ffe0526 646static void
647df_install_ref_incremental (df_ref ref)
648{
649 struct df_reg_info **reg_info;
650 struct df_ref_info *ref_info;
ddc2d0e3 651 df_ref *ref_ptr;
4ffe0526 652 bool add_to_table;
653
7a6a083c 654 rtx_insn *insn = DF_REF_INSN (ref);
4ffe0526 655 basic_block bb = BLOCK_FOR_INSN (insn);
e011eba9 656
ed6e85ae 657 if (DF_REF_REG_DEF_P (ref))
3072d30e 658 {
659 reg_info = df->def_regs;
660 ref_info = &df->def_info;
ddc2d0e3 661 ref_ptr = &DF_INSN_DEFS (insn);
3072d30e 662 add_to_table = ref_info->ref_order != DF_REF_ORDER_NO_TABLE;
663 }
664 else if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE)
665 {
666 reg_info = df->eq_use_regs;
667 ref_info = &df->use_info;
ddc2d0e3 668 ref_ptr = &DF_INSN_EQ_USES (insn);
3072d30e 669 switch (ref_info->ref_order)
670 {
671 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
672 case DF_REF_ORDER_BY_REG_WITH_NOTES:
673 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
674 add_to_table = true;
675 break;
676 default:
677 add_to_table = false;
678 break;
679 }
680 }
681 else
682 {
683 reg_info = df->use_regs;
684 ref_info = &df->use_info;
ddc2d0e3 685 ref_ptr = &DF_INSN_USES (insn);
3072d30e 686 add_to_table = ref_info->ref_order != DF_REF_ORDER_NO_TABLE;
687 }
e011eba9 688
3072d30e 689 /* Do not add if ref is not in the right blocks. */
690 if (add_to_table && df->analyze_subset)
691 add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
e011eba9 692
3072d30e 693 df_install_ref (ref, reg_info[DF_REF_REGNO (ref)], ref_info, add_to_table);
48e1416a 694
3072d30e 695 if (add_to_table)
696 switch (ref_info->ref_order)
697 {
698 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
699 case DF_REF_ORDER_BY_REG_WITH_NOTES:
700 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
701 ref_info->ref_order = DF_REF_ORDER_UNORDERED_WITH_NOTES;
702 break;
703 default:
704 ref_info->ref_order = DF_REF_ORDER_UNORDERED;
705 break;
706 }
e011eba9 707
ddc2d0e3 708 while (*ref_ptr && df_ref_compare (*ref_ptr, ref) < 0)
709 ref_ptr = &DF_REF_NEXT_LOC (*ref_ptr);
e011eba9 710
ddc2d0e3 711 DF_REF_NEXT_LOC (ref) = *ref_ptr;
712 *ref_ptr = ref;
e011eba9 713
3072d30e 714#if 0
715 if (dump_file)
716 {
717 fprintf (dump_file, "adding ref ");
718 df_ref_debug (ref, dump_file);
e011eba9 719 }
3072d30e 720#endif
721 /* By adding the ref directly, df_insn_rescan my not find any
722 differences even though the block will have changed. So we need
48e1416a 723 to mark the block dirty ourselves. */
890dcf62 724 if (!DEBUG_INSN_P (DF_REF_INSN (ref)))
725 df_set_bb_dirty (bb);
e011eba9 726}
727
728
3072d30e 729\f
730/*----------------------------------------------------------------------------
731 UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
732----------------------------------------------------------------------------*/
733
30de5b55 734static void
ed6e85ae 735df_free_ref (df_ref ref)
30de5b55 736{
737 struct df_scan_problem_data *problem_data
738 = (struct df_scan_problem_data *) df_scan->problem_data;
739
ed6e85ae 740 switch (DF_REF_CLASS (ref))
741 {
742 case DF_REF_BASE:
743 pool_free (problem_data->ref_base_pool, ref);
744 break;
745
746 case DF_REF_ARTIFICIAL:
747 pool_free (problem_data->ref_artificial_pool, ref);
748 break;
749
750 case DF_REF_REGULAR:
751 pool_free (problem_data->ref_regular_pool, ref);
752 break;
ed6e85ae 753 }
30de5b55 754}
755
e011eba9 756
3072d30e 757/* Unlink and delete REF at the reg_use, reg_eq_use or reg_def chain.
758 Also delete the def-use or use-def chain if it exists. */
759
760static void
48e1416a 761df_reg_chain_unlink (df_ref ref)
e011eba9 762{
48e1416a 763 df_ref next = DF_REF_NEXT_REG (ref);
ed6e85ae 764 df_ref prev = DF_REF_PREV_REG (ref);
3072d30e 765 int id = DF_REF_ID (ref);
e011eba9 766 struct df_reg_info *reg_info;
ed6e85ae 767 df_ref *refs = NULL;
e011eba9 768
ed6e85ae 769 if (DF_REF_REG_DEF_P (ref))
e011eba9 770 {
ed6e85ae 771 int regno = DF_REF_REGNO (ref);
772 reg_info = DF_REG_DEF_GET (regno);
3072d30e 773 refs = df->def_info.refs;
e011eba9 774 }
48e1416a 775 else
e011eba9 776 {
3072d30e 777 if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE)
778 {
779 reg_info = DF_REG_EQ_USE_GET (DF_REF_REGNO (ref));
780 switch (df->use_info.ref_order)
781 {
782 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
783 case DF_REF_ORDER_BY_REG_WITH_NOTES:
784 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
785 refs = df->use_info.refs;
786 break;
787 default:
788 break;
789 }
790 }
791 else
792 {
793 reg_info = DF_REG_USE_GET (DF_REF_REGNO (ref));
794 refs = df->use_info.refs;
795 }
796 }
797
798 if (refs)
799 {
800 if (df->analyze_subset)
801 {
ed6e85ae 802 if (bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (ref)))
3072d30e 803 refs[id] = NULL;
804 }
805 else
806 refs[id] = NULL;
e011eba9 807 }
48e1416a 808
3072d30e 809 /* Delete any def-use or use-def chains that start here. It is
810 possible that there is trash in this field. This happens for
811 insns that have been deleted when rescanning has been deferred
812 and the chain problem has also been deleted. The chain tear down
813 code skips deleted insns. */
814 if (df_chain && DF_REF_CHAIN (ref))
815 df_chain_unlink (ref);
48e1416a 816
e011eba9 817 reg_info->n_refs--;
3072d30e 818 if (DF_REF_FLAGS_IS_SET (ref, DF_HARD_REG_LIVE))
819 {
820 gcc_assert (DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER);
821 df->hard_regs_live_count[DF_REF_REGNO (ref)]--;
822 }
e011eba9 823
824 /* Unlink from the reg chain. If there is no prev, this is the
825 first of the list. If not, just join the next and prev. */
826 if (prev)
3072d30e 827 DF_REF_NEXT_REG (prev) = next;
e011eba9 828 else
829 {
3072d30e 830 gcc_assert (reg_info->reg_chain == ref);
e011eba9 831 reg_info->reg_chain = next;
e011eba9 832 }
3072d30e 833 if (next)
834 DF_REF_PREV_REG (next) = prev;
e011eba9 835
30de5b55 836 df_free_ref (ref);
3072d30e 837}
838
839
3072d30e 840/* Create the insn record for INSN. If there was one there, zero it
841 out. */
e011eba9 842
3072d30e 843struct df_insn_info *
e149ca56 844df_insn_create_insn_record (rtx_insn *insn)
e011eba9 845{
3e6933a8 846 struct df_scan_problem_data *problem_data
3072d30e 847 = (struct df_scan_problem_data *) df_scan->problem_data;
848 struct df_insn_info *insn_rec;
e011eba9 849
3072d30e 850 df_grow_insn_info ();
158b6cc9 851 insn_rec = DF_INSN_INFO_GET (insn);
e011eba9 852 if (!insn_rec)
853 {
364c0c59 854 insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
158b6cc9 855 DF_INSN_INFO_SET (insn, insn_rec);
e011eba9 856 }
857 memset (insn_rec, 0, sizeof (struct df_insn_info));
3072d30e 858 insn_rec->insn = insn;
e011eba9 859 return insn_rec;
860}
861
d0802b39 862
3072d30e 863/* Delete all du chain (DF_REF_CHAIN()) of all refs in the ref chain. */
e011eba9 864
3072d30e 865static void
ddc2d0e3 866df_ref_chain_delete_du_chain (df_ref ref)
e011eba9 867{
ddc2d0e3 868 for (; ref; ref = DF_REF_NEXT_LOC (ref))
869 /* CHAIN is allocated by DF_CHAIN. So make sure to
870 pass df_scan instance for the problem. */
871 if (DF_REF_CHAIN (ref))
872 df_chain_unlink (ref);
3072d30e 873}
3e6933a8 874
e011eba9 875
3072d30e 876/* Delete all refs in the ref chain. */
877
878static void
ddc2d0e3 879df_ref_chain_delete (df_ref ref)
3072d30e 880{
ddc2d0e3 881 df_ref next;
882 for (; ref; ref = next)
3072d30e 883 {
ddc2d0e3 884 next = DF_REF_NEXT_LOC (ref);
885 df_reg_chain_unlink (ref);
3072d30e 886 }
3072d30e 887}
888
889
890/* Delete the hardreg chain. */
891
892static void
ddc2d0e3 893df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
3072d30e 894{
ddc2d0e3 895 struct df_scan_problem_data *problem_data
896 = (struct df_scan_problem_data *) df_scan->problem_data;
897 df_mw_hardreg *next;
3072d30e 898
ddc2d0e3 899 for (; hardregs; hardregs = next)
3072d30e 900 {
ddc2d0e3 901 next = DF_MWS_NEXT (hardregs);
902 pool_free (problem_data->mw_reg_pool, hardregs);
3072d30e 903 }
904}
905
906
b983ea33 907/* Delete all of the refs information from the insn with UID.
908 Internal helper for df_insn_delete, df_insn_rescan, and other
909 df-scan routines that don't have to work in deferred mode
910 and do not have to mark basic blocks for re-processing. */
3072d30e 911
b983ea33 912static void
913df_insn_info_delete (unsigned int uid)
3072d30e 914{
b983ea33 915 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
3072d30e 916
4b5a4301 917 bitmap_clear_bit (&df->insns_to_delete, uid);
918 bitmap_clear_bit (&df->insns_to_rescan, uid);
919 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
3072d30e 920 if (insn_info)
921 {
48e1416a 922 struct df_scan_problem_data *problem_data
3072d30e 923 = (struct df_scan_problem_data *) df_scan->problem_data;
924
925 /* In general, notes do not have the insn_info fields
926 initialized. However, combine deletes insns by changing them
927 to notes. How clever. So we cannot just check if it is a
928 valid insn before short circuiting this code, we need to see
929 if we actually initialized it. */
ddc2d0e3 930 df_mw_hardreg_chain_delete (insn_info->mw_hardregs);
931
932 if (df_chain)
3072d30e 933 {
ddc2d0e3 934 df_ref_chain_delete_du_chain (insn_info->defs);
935 df_ref_chain_delete_du_chain (insn_info->uses);
936 df_ref_chain_delete_du_chain (insn_info->eq_uses);
937 }
48e1416a 938
ddc2d0e3 939 df_ref_chain_delete (insn_info->defs);
940 df_ref_chain_delete (insn_info->uses);
941 df_ref_chain_delete (insn_info->eq_uses);
48e1416a 942
e011eba9 943 pool_free (problem_data->insn_pool, insn_info);
3072d30e 944 DF_INSN_UID_SET (uid, NULL);
e011eba9 945 }
946}
947
b983ea33 948/* Delete all of the refs information from INSN, either right now
949 or marked for later in deferred mode. */
950
951void
e149ca56 952df_insn_delete (rtx_insn *insn)
b983ea33 953{
954 unsigned int uid;
955 basic_block bb;
956
957 gcc_checking_assert (INSN_P (insn));
958
959 if (!df)
960 return;
961
962 uid = INSN_UID (insn);
963 bb = BLOCK_FOR_INSN (insn);
964
965 /* ??? bb can be NULL after pass_free_cfg. At that point, DF should
966 not exist anymore (as mentioned in df-core.c: "The only requirement
967 [for DF] is that there be a correct control flow graph." Clearly
968 that isn't the case after pass_free_cfg. But DF is freed much later
969 because some back-ends want to use DF info even though the CFG is
970 already gone. It's not clear to me whether that is safe, actually.
971 In any case, we expect BB to be non-NULL at least up to register
972 allocation, so disallow a non-NULL BB up to there. Not perfect
973 but better than nothing... */
b983ea33 974 gcc_checking_assert (bb != NULL || reload_completed);
975
976 df_grow_bb_info (df_scan);
977 df_grow_reg_info ();
978
979 /* The block must be marked as dirty now, rather than later as in
980 df_insn_rescan and df_notes_rescan because it may not be there at
981 rescanning time and the mark would blow up.
982 DEBUG_INSNs do not make a block's data flow solution dirty (at
983 worst the LUIDs are no longer contiguous). */
984 if (bb != NULL && NONDEBUG_INSN_P (insn))
985 df_set_bb_dirty (bb);
986
987 /* The client has deferred rescanning. */
988 if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
989 {
990 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
991 if (insn_info)
992 {
993 bitmap_clear_bit (&df->insns_to_rescan, uid);
994 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
995 bitmap_set_bit (&df->insns_to_delete, uid);
996 }
997 if (dump_file)
998 fprintf (dump_file, "deferring deletion of insn with uid = %d.\n", uid);
999 return;
1000 }
1001
1002 if (dump_file)
1003 fprintf (dump_file, "deleting insn with uid = %d.\n", uid);
1004
1005 df_insn_info_delete (uid);
1006}
1007
e011eba9 1008
3072d30e 1009/* Free all of the refs and the mw_hardregs in COLLECTION_REC. */
d0802b39 1010
3072d30e 1011static void
1012df_free_collection_rec (struct df_collection_rec *collection_rec)
d0802b39 1013{
f7f05a07 1014 unsigned int ix;
48e1416a 1015 struct df_scan_problem_data *problem_data
3072d30e 1016 = (struct df_scan_problem_data *) df_scan->problem_data;
f7f05a07 1017 df_ref ref;
1018 struct df_mw_hardreg *mw;
1019
f1f41a6c 1020 FOR_EACH_VEC_ELT (collection_rec->def_vec, ix, ref)
f7f05a07 1021 df_free_ref (ref);
f1f41a6c 1022 FOR_EACH_VEC_ELT (collection_rec->use_vec, ix, ref)
f7f05a07 1023 df_free_ref (ref);
f1f41a6c 1024 FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
f7f05a07 1025 df_free_ref (ref);
f1f41a6c 1026 FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
f7f05a07 1027 pool_free (problem_data->mw_reg_pool, mw);
1028
f1f41a6c 1029 collection_rec->def_vec.release ();
1030 collection_rec->use_vec.release ();
1031 collection_rec->eq_use_vec.release ();
1032 collection_rec->mw_vec.release ();
3072d30e 1033}
d0802b39 1034
3072d30e 1035/* Rescan INSN. Return TRUE if the rescanning produced any changes. */
1036
48e1416a 1037bool
e149ca56 1038df_insn_rescan (rtx_insn *insn)
3072d30e 1039{
1040 unsigned int uid = INSN_UID (insn);
1041 struct df_insn_info *insn_info = NULL;
1042 basic_block bb = BLOCK_FOR_INSN (insn);
1043 struct df_collection_rec collection_rec;
3072d30e 1044
1045 if ((!df) || (!INSN_P (insn)))
1046 return false;
1047
1048 if (!bb)
d0802b39 1049 {
3072d30e 1050 if (dump_file)
1051 fprintf (dump_file, "no bb for insn with uid = %d.\n", uid);
1052 return false;
1053 }
1054
1055 /* The client has disabled rescanning and plans to do it itself. */
1056 if (df->changeable_flags & DF_NO_INSN_RESCAN)
1057 return false;
1058
1059 df_grow_bb_info (df_scan);
1060 df_grow_reg_info ();
1061
1062 insn_info = DF_INSN_UID_SAFE_GET (uid);
1063
a3de79f9 1064 /* The client has deferred rescanning. */
3072d30e 1065 if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
1066 {
1067 if (!insn_info)
1068 {
1069 insn_info = df_insn_create_insn_record (insn);
ddc2d0e3 1070 insn_info->defs = 0;
1071 insn_info->uses = 0;
1072 insn_info->eq_uses = 0;
1073 insn_info->mw_hardregs = 0;
3072d30e 1074 }
1075 if (dump_file)
bff9eb26 1076 fprintf (dump_file, "deferring rescan insn with uid = %d.\n", uid);
48e1416a 1077
4b5a4301 1078 bitmap_clear_bit (&df->insns_to_delete, uid);
1079 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
1080 bitmap_set_bit (&df->insns_to_rescan, INSN_UID (insn));
3072d30e 1081 return false;
1082 }
1083
4b5a4301 1084 bitmap_clear_bit (&df->insns_to_delete, uid);
1085 bitmap_clear_bit (&df->insns_to_rescan, uid);
1086 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
3072d30e 1087 if (insn_info)
1088 {
2e81afe5 1089 int luid;
3072d30e 1090 bool the_same = df_insn_refs_verify (&collection_rec, bb, insn, false);
1091 /* If there's no change, return false. */
1092 if (the_same)
d0802b39 1093 {
3072d30e 1094 df_free_collection_rec (&collection_rec);
1095 if (dump_file)
1096 fprintf (dump_file, "verify found no changes in insn with uid = %d.\n", uid);
1097 return false;
d0802b39 1098 }
3072d30e 1099 if (dump_file)
1100 fprintf (dump_file, "rescanning insn with uid = %d.\n", uid);
1101
2e81afe5 1102 /* There's change - we need to delete the existing info.
1103 Since the insn isn't moved, we can salvage its LUID. */
1104 luid = DF_INSN_LUID (insn);
b983ea33 1105 df_insn_info_delete (uid);
3072d30e 1106 df_insn_create_insn_record (insn);
2e81afe5 1107 DF_INSN_LUID (insn) = luid;
3072d30e 1108 }
1109 else
1110 {
158b6cc9 1111 struct df_insn_info *insn_info = df_insn_create_insn_record (insn);
1112 df_insn_refs_collect (&collection_rec, bb, insn_info);
3072d30e 1113 if (dump_file)
1114 fprintf (dump_file, "scanning new insn with uid = %d.\n", uid);
1115 }
1116
d70aebca 1117 df_refs_add_to_chains (&collection_rec, bb, insn, copy_all);
0a7642a1 1118 if (!DEBUG_INSN_P (insn))
86fc6921 1119 df_set_bb_dirty (bb);
f7f05a07 1120
3072d30e 1121 return true;
1122}
1123
9845d120 1124/* Same as df_insn_rescan, but don't mark the basic block as
1125 dirty. */
1126
1127bool
e149ca56 1128df_insn_rescan_debug_internal (rtx_insn *insn)
9845d120 1129{
1130 unsigned int uid = INSN_UID (insn);
1131 struct df_insn_info *insn_info;
1132
76d2e170 1133 gcc_assert (DEBUG_INSN_P (insn)
1134 && VAR_LOC_UNKNOWN_P (INSN_VAR_LOCATION_LOC (insn)));
9845d120 1135
1136 if (!df)
1137 return false;
1138
1139 insn_info = DF_INSN_UID_SAFE_GET (INSN_UID (insn));
1140 if (!insn_info)
1141 return false;
1142
1143 if (dump_file)
1144 fprintf (dump_file, "deleting debug_insn with uid = %d.\n", uid);
1145
4b5a4301 1146 bitmap_clear_bit (&df->insns_to_delete, uid);
1147 bitmap_clear_bit (&df->insns_to_rescan, uid);
1148 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
9845d120 1149
ddc2d0e3 1150 if (insn_info->defs == 0
1151 && insn_info->uses == 0
1152 && insn_info->eq_uses == 0
1153 && insn_info->mw_hardregs == 0)
9845d120 1154 return false;
1155
1156 df_mw_hardreg_chain_delete (insn_info->mw_hardregs);
1157
1158 if (df_chain)
1159 {
1160 df_ref_chain_delete_du_chain (insn_info->defs);
1161 df_ref_chain_delete_du_chain (insn_info->uses);
1162 df_ref_chain_delete_du_chain (insn_info->eq_uses);
1163 }
1164
1165 df_ref_chain_delete (insn_info->defs);
1166 df_ref_chain_delete (insn_info->uses);
1167 df_ref_chain_delete (insn_info->eq_uses);
1168
ddc2d0e3 1169 insn_info->defs = 0;
1170 insn_info->uses = 0;
1171 insn_info->eq_uses = 0;
1172 insn_info->mw_hardregs = 0;
9845d120 1173
1174 return true;
1175}
1176
3072d30e 1177
1178/* Rescan all of the insns in the function. Note that the artificial
b983ea33 1179 uses and defs are not touched. This function will destroy def-use
3072d30e 1180 or use-def chains. */
1181
1182void
1183df_insn_rescan_all (void)
1184{
1185 bool no_insn_rescan = false;
1186 bool defer_insn_rescan = false;
1187 basic_block bb;
1188 bitmap_iterator bi;
1189 unsigned int uid;
4b5a4301 1190 bitmap_head tmp;
1191
1192 bitmap_initialize (&tmp, &df_bitmap_obstack);
48e1416a 1193
3072d30e 1194 if (df->changeable_flags & DF_NO_INSN_RESCAN)
1195 {
1196 df_clear_flags (DF_NO_INSN_RESCAN);
1197 no_insn_rescan = true;
d0802b39 1198 }
48e1416a 1199
3072d30e 1200 if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
d0802b39 1201 {
3072d30e 1202 df_clear_flags (DF_DEFER_INSN_RESCAN);
1203 defer_insn_rescan = true;
1204 }
1205
4b5a4301 1206 bitmap_copy (&tmp, &df->insns_to_delete);
1207 EXECUTE_IF_SET_IN_BITMAP (&tmp, 0, uid, bi)
3072d30e 1208 {
1209 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
1210 if (insn_info)
b983ea33 1211 df_insn_info_delete (uid);
d0802b39 1212 }
3072d30e 1213
4b5a4301 1214 bitmap_clear (&tmp);
1215 bitmap_clear (&df->insns_to_delete);
1216 bitmap_clear (&df->insns_to_rescan);
1217 bitmap_clear (&df->insns_to_notes_rescan);
3072d30e 1218
fc00614f 1219 FOR_EACH_BB_FN (bb, cfun)
3072d30e 1220 {
7a6a083c 1221 rtx_insn *insn;
3072d30e 1222 FOR_BB_INSNS (bb, insn)
1223 {
1224 df_insn_rescan (insn);
1225 }
1226 }
1227
1228 if (no_insn_rescan)
1229 df_set_flags (DF_NO_INSN_RESCAN);
1230 if (defer_insn_rescan)
1231 df_set_flags (DF_DEFER_INSN_RESCAN);
d0802b39 1232}
1233
1234
a3de79f9 1235/* Process all of the deferred rescans or deletions. */
e011eba9 1236
3072d30e 1237void
1238df_process_deferred_rescans (void)
e011eba9 1239{
3072d30e 1240 bool no_insn_rescan = false;
1241 bool defer_insn_rescan = false;
e011eba9 1242 bitmap_iterator bi;
3072d30e 1243 unsigned int uid;
4b5a4301 1244 bitmap_head tmp;
1245
1246 bitmap_initialize (&tmp, &df_bitmap_obstack);
48e1416a 1247
3072d30e 1248 if (df->changeable_flags & DF_NO_INSN_RESCAN)
1249 {
1250 df_clear_flags (DF_NO_INSN_RESCAN);
1251 no_insn_rescan = true;
1252 }
48e1416a 1253
3072d30e 1254 if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
1255 {
1256 df_clear_flags (DF_DEFER_INSN_RESCAN);
1257 defer_insn_rescan = true;
1258 }
1259
1260 if (dump_file)
a3de79f9 1261 fprintf (dump_file, "starting the processing of deferred insns\n");
3072d30e 1262
4b5a4301 1263 bitmap_copy (&tmp, &df->insns_to_delete);
1264 EXECUTE_IF_SET_IN_BITMAP (&tmp, 0, uid, bi)
3072d30e 1265 {
1266 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
1267 if (insn_info)
b983ea33 1268 df_insn_info_delete (uid);
3072d30e 1269 }
1270
4b5a4301 1271 bitmap_copy (&tmp, &df->insns_to_rescan);
1272 EXECUTE_IF_SET_IN_BITMAP (&tmp, 0, uid, bi)
3072d30e 1273 {
1274 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
1275 if (insn_info)
1276 df_insn_rescan (insn_info->insn);
1277 }
1278
4b5a4301 1279 bitmap_copy (&tmp, &df->insns_to_notes_rescan);
1280 EXECUTE_IF_SET_IN_BITMAP (&tmp, 0, uid, bi)
3072d30e 1281 {
1282 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid);
1283 if (insn_info)
1284 df_notes_rescan (insn_info->insn);
1285 }
1286
1287 if (dump_file)
a3de79f9 1288 fprintf (dump_file, "ending the processing of deferred insns\n");
3072d30e 1289
4b5a4301 1290 bitmap_clear (&tmp);
1291 bitmap_clear (&df->insns_to_delete);
1292 bitmap_clear (&df->insns_to_rescan);
1293 bitmap_clear (&df->insns_to_notes_rescan);
3072d30e 1294
1295 if (no_insn_rescan)
1296 df_set_flags (DF_NO_INSN_RESCAN);
1297 if (defer_insn_rescan)
1298 df_set_flags (DF_DEFER_INSN_RESCAN);
1299
1300 /* If someone changed regs_ever_live during this pass, fix up the
1301 entry and exit blocks. */
1302 if (df->redo_entry_and_exit)
1303 {
1304 df_update_entry_exit_and_calls ();
1305 df->redo_entry_and_exit = false;
1306 }
1307}
1308
e011eba9 1309
3072d30e 1310/* Count the number of refs. Include the defs if INCLUDE_DEFS. Include
1311 the uses if INCLUDE_USES. Include the eq_uses if
1312 INCLUDE_EQ_USES. */
1313
1314static unsigned int
48e1416a 1315df_count_refs (bool include_defs, bool include_uses,
3072d30e 1316 bool include_eq_uses)
1317{
1318 unsigned int regno;
1319 int size = 0;
1320 unsigned int m = df->regs_inited;
48e1416a 1321
3072d30e 1322 for (regno = 0; regno < m; regno++)
e011eba9 1323 {
3072d30e 1324 if (include_defs)
1325 size += DF_REG_DEF_COUNT (regno);
1326 if (include_uses)
1327 size += DF_REG_USE_COUNT (regno);
1328 if (include_eq_uses)
1329 size += DF_REG_EQ_USE_COUNT (regno);
e011eba9 1330 }
3072d30e 1331 return size;
e011eba9 1332}
1333
1334
1335/* Take build ref table for either the uses or defs from the reg-use
3072d30e 1336 or reg-def chains. This version processes the refs in reg order
1337 which is likely to be best if processing the whole function. */
e011eba9 1338
48e1416a 1339static void
3072d30e 1340df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info,
48e1416a 1341 bool include_defs,
1342 bool include_uses,
3072d30e 1343 bool include_eq_uses)
e011eba9 1344{
3072d30e 1345 unsigned int m = df->regs_inited;
e011eba9 1346 unsigned int regno;
1347 unsigned int offset = 0;
3072d30e 1348 unsigned int start;
e011eba9 1349
3072d30e 1350 if (df->changeable_flags & DF_NO_HARD_REGS)
1351 {
1352 start = FIRST_PSEUDO_REGISTER;
1353 memset (ref_info->begin, 0, sizeof (int) * FIRST_PSEUDO_REGISTER);
1354 memset (ref_info->count, 0, sizeof (int) * FIRST_PSEUDO_REGISTER);
e011eba9 1355 }
3072d30e 1356 else
1357 start = 0;
e011eba9 1358
48e1416a 1359 ref_info->total_size
3072d30e 1360 = df_count_refs (include_defs, include_uses, include_eq_uses);
1361
1362 df_check_and_grow_ref_info (ref_info, 1);
1363
1364 for (regno = start; regno < m; regno++)
e011eba9 1365 {
e011eba9 1366 int count = 0;
3072d30e 1367 ref_info->begin[regno] = offset;
1368 if (include_defs)
1369 {
ed6e85ae 1370 df_ref ref = DF_REG_DEF_CHAIN (regno);
48e1416a 1371 while (ref)
3072d30e 1372 {
1373 ref_info->refs[offset] = ref;
1374 DF_REF_ID (ref) = offset++;
1375 count++;
1376 ref = DF_REF_NEXT_REG (ref);
0ea2d350 1377 gcc_checking_assert (offset < ref_info->refs_size);
3072d30e 1378 }
1379 }
1380 if (include_uses)
e011eba9 1381 {
ed6e85ae 1382 df_ref ref = DF_REG_USE_CHAIN (regno);
48e1416a 1383 while (ref)
e011eba9 1384 {
1385 ref_info->refs[offset] = ref;
1386 DF_REF_ID (ref) = offset++;
3072d30e 1387 count++;
e011eba9 1388 ref = DF_REF_NEXT_REG (ref);
0ea2d350 1389 gcc_checking_assert (offset < ref_info->refs_size);
3072d30e 1390 }
1391 }
1392 if (include_eq_uses)
1393 {
ed6e85ae 1394 df_ref ref = DF_REG_EQ_USE_CHAIN (regno);
48e1416a 1395 while (ref)
3072d30e 1396 {
1397 ref_info->refs[offset] = ref;
1398 DF_REF_ID (ref) = offset++;
e011eba9 1399 count++;
3072d30e 1400 ref = DF_REF_NEXT_REG (ref);
0ea2d350 1401 gcc_checking_assert (offset < ref_info->refs_size);
e011eba9 1402 }
e011eba9 1403 }
3072d30e 1404 ref_info->count[regno] = count;
e011eba9 1405 }
48e1416a 1406
e011eba9 1407 /* The bitmap size is not decremented when refs are deleted. So
1408 reset it now that we have squished out all of the empty
1409 slots. */
3072d30e 1410 ref_info->table_size = offset;
e011eba9 1411}
1412
e011eba9 1413
3072d30e 1414/* Take build ref table for either the uses or defs from the reg-use
1415 or reg-def chains. This version processes the refs in insn order
1416 which is likely to be best if processing some segment of the
1417 function. */
e011eba9 1418
48e1416a 1419static void
3072d30e 1420df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info,
48e1416a 1421 bool include_defs,
1422 bool include_uses,
3072d30e 1423 bool include_eq_uses)
e011eba9 1424{
3072d30e 1425 bitmap_iterator bi;
1426 unsigned int bb_index;
1427 unsigned int m = df->regs_inited;
1428 unsigned int offset = 0;
1429 unsigned int r;
48e1416a 1430 unsigned int start
3072d30e 1431 = (df->changeable_flags & DF_NO_HARD_REGS) ? FIRST_PSEUDO_REGISTER : 0;
e011eba9 1432
3072d30e 1433 memset (ref_info->begin, 0, sizeof (int) * df->regs_inited);
1434 memset (ref_info->count, 0, sizeof (int) * df->regs_inited);
1435
1436 ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses);
1437 df_check_and_grow_ref_info (ref_info, 1);
e011eba9 1438
3072d30e 1439 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
e011eba9 1440 {
f5a6b05f 1441 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7a6a083c 1442 rtx_insn *insn;
be10bb5a 1443 df_ref def, use;
3072d30e 1444
1445 if (include_defs)
f1c570a6 1446 FOR_EACH_ARTIFICIAL_DEF (def, bb_index)
3e6933a8 1447 {
f1c570a6 1448 unsigned int regno = DF_REF_REGNO (def);
3072d30e 1449 ref_info->count[regno]++;
3e6933a8 1450 }
3072d30e 1451 if (include_uses)
f1c570a6 1452 FOR_EACH_ARTIFICIAL_USE (use, bb_index)
3e6933a8 1453 {
f1c570a6 1454 unsigned int regno = DF_REF_REGNO (use);
3072d30e 1455 ref_info->count[regno]++;
3e6933a8 1456 }
e011eba9 1457
3072d30e 1458 FOR_BB_INSNS (bb, insn)
1459 {
1460 if (INSN_P (insn))
1461 {
be10bb5a 1462 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
48e1416a 1463
3072d30e 1464 if (include_defs)
be10bb5a 1465 FOR_EACH_INSN_INFO_DEF (def, insn_info)
3072d30e 1466 {
be10bb5a 1467 unsigned int regno = DF_REF_REGNO (def);
3072d30e 1468 ref_info->count[regno]++;
1469 }
1470 if (include_uses)
be10bb5a 1471 FOR_EACH_INSN_INFO_USE (use, insn_info)
3072d30e 1472 {
be10bb5a 1473 unsigned int regno = DF_REF_REGNO (use);
3072d30e 1474 ref_info->count[regno]++;
1475 }
1476 if (include_eq_uses)
be10bb5a 1477 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
3072d30e 1478 {
be10bb5a 1479 unsigned int regno = DF_REF_REGNO (use);
3072d30e 1480 ref_info->count[regno]++;
1481 }
1482 }
1483 }
1484 }
1485
1486 for (r = start; r < m; r++)
1487 {
1488 ref_info->begin[r] = offset;
1489 offset += ref_info->count[r];
1490 ref_info->count[r] = 0;
1491 }
48e1416a 1492
3072d30e 1493 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
1494 {
f5a6b05f 1495 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7a6a083c 1496 rtx_insn *insn;
be10bb5a 1497 df_ref def, use;
3072d30e 1498
1499 if (include_defs)
f1c570a6 1500 FOR_EACH_ARTIFICIAL_DEF (def, bb_index)
3e6933a8 1501 {
f1c570a6 1502 unsigned int regno = DF_REF_REGNO (def);
3072d30e 1503 if (regno >= start)
3e6933a8 1504 {
3072d30e 1505 unsigned int id
1506 = ref_info->begin[regno] + ref_info->count[regno]++;
f1c570a6 1507 DF_REF_ID (def) = id;
1508 ref_info->refs[id] = def;
3e6933a8 1509 }
3e6933a8 1510 }
3072d30e 1511 if (include_uses)
f1c570a6 1512 FOR_EACH_ARTIFICIAL_USE (use, bb_index)
3e6933a8 1513 {
f1c570a6 1514 unsigned int regno = DF_REF_REGNO (def);
3072d30e 1515 if (regno >= start)
1516 {
1517 unsigned int id
1518 = ref_info->begin[regno] + ref_info->count[regno]++;
f1c570a6 1519 DF_REF_ID (use) = id;
1520 ref_info->refs[id] = use;
3072d30e 1521 }
3e6933a8 1522 }
3072d30e 1523
1524 FOR_BB_INSNS (bb, insn)
1525 {
1526 if (INSN_P (insn))
1527 {
be10bb5a 1528 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
48e1416a 1529
3072d30e 1530 if (include_defs)
be10bb5a 1531 FOR_EACH_INSN_INFO_DEF (def, insn_info)
3072d30e 1532 {
be10bb5a 1533 unsigned int regno = DF_REF_REGNO (def);
3072d30e 1534 if (regno >= start)
1535 {
1536 unsigned int id
1537 = ref_info->begin[regno] + ref_info->count[regno]++;
be10bb5a 1538 DF_REF_ID (def) = id;
1539 ref_info->refs[id] = def;
3072d30e 1540 }
1541 }
1542 if (include_uses)
be10bb5a 1543 FOR_EACH_INSN_INFO_USE (use, insn_info)
3072d30e 1544 {
be10bb5a 1545 unsigned int regno = DF_REF_REGNO (use);
3072d30e 1546 if (regno >= start)
1547 {
1548 unsigned int id
1549 = ref_info->begin[regno] + ref_info->count[regno]++;
be10bb5a 1550 DF_REF_ID (use) = id;
1551 ref_info->refs[id] = use;
3072d30e 1552 }
1553 }
1554 if (include_eq_uses)
be10bb5a 1555 FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
3072d30e 1556 {
be10bb5a 1557 unsigned int regno = DF_REF_REGNO (use);
3072d30e 1558 if (regno >= start)
1559 {
1560 unsigned int id
1561 = ref_info->begin[regno] + ref_info->count[regno]++;
be10bb5a 1562 DF_REF_ID (use) = id;
1563 ref_info->refs[id] = use;
3072d30e 1564 }
1565 }
1566 }
1567 }
1568 }
1569
1570 /* The bitmap size is not decremented when refs are deleted. So
1571 reset it now that we have squished out all of the empty
1572 slots. */
1573
1574 ref_info->table_size = offset;
1575}
1576
1577/* Take build ref table for either the uses or defs from the reg-use
1578 or reg-def chains. */
1579
48e1416a 1580static void
3072d30e 1581df_reorganize_refs_by_reg (struct df_ref_info *ref_info,
48e1416a 1582 bool include_defs,
1583 bool include_uses,
3072d30e 1584 bool include_eq_uses)
1585{
1586 if (df->analyze_subset)
48e1416a 1587 df_reorganize_refs_by_reg_by_insn (ref_info, include_defs,
3072d30e 1588 include_uses, include_eq_uses);
1589 else
48e1416a 1590 df_reorganize_refs_by_reg_by_reg (ref_info, include_defs,
3072d30e 1591 include_uses, include_eq_uses);
1592}
1593
1594
1595/* Add the refs in REF_VEC to the table in REF_INFO starting at OFFSET. */
48e1416a 1596static unsigned int
1597df_add_refs_to_table (unsigned int offset,
1598 struct df_ref_info *ref_info,
ddc2d0e3 1599 df_ref ref)
3072d30e 1600{
ddc2d0e3 1601 for (; ref; ref = DF_REF_NEXT_LOC (ref))
1602 if (!(df->changeable_flags & DF_NO_HARD_REGS)
1603 || (DF_REF_REGNO (ref) >= FIRST_PSEUDO_REGISTER))
1604 {
1605 ref_info->refs[offset] = ref;
1606 DF_REF_ID (ref) = offset++;
1607 }
3072d30e 1608 return offset;
1609}
1610
1611
1612/* Count the number of refs in all of the insns of BB. Include the
1613 defs if INCLUDE_DEFS. Include the uses if INCLUDE_USES. Include the
1614 eq_uses if INCLUDE_EQ_USES. */
1615
1616static unsigned int
48e1416a 1617df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset,
3072d30e 1618 struct df_ref_info *ref_info,
48e1416a 1619 bool include_defs, bool include_uses,
3072d30e 1620 bool include_eq_uses)
1621{
7a6a083c 1622 rtx_insn *insn;
3072d30e 1623
1624 if (include_defs)
48e1416a 1625 offset = df_add_refs_to_table (offset, ref_info,
3072d30e 1626 df_get_artificial_defs (bb->index));
1627 if (include_uses)
48e1416a 1628 offset = df_add_refs_to_table (offset, ref_info,
3072d30e 1629 df_get_artificial_uses (bb->index));
1630
1631 FOR_BB_INSNS (bb, insn)
1632 if (INSN_P (insn))
1633 {
1634 unsigned int uid = INSN_UID (insn);
1635 if (include_defs)
48e1416a 1636 offset = df_add_refs_to_table (offset, ref_info,
3072d30e 1637 DF_INSN_UID_DEFS (uid));
1638 if (include_uses)
48e1416a 1639 offset = df_add_refs_to_table (offset, ref_info,
3072d30e 1640 DF_INSN_UID_USES (uid));
1641 if (include_eq_uses)
48e1416a 1642 offset = df_add_refs_to_table (offset, ref_info,
3072d30e 1643 DF_INSN_UID_EQ_USES (uid));
3e6933a8 1644 }
3072d30e 1645 return offset;
1646}
1647
1648
bef304b8 1649/* Organize the refs by insn into the table in REF_INFO. If
3072d30e 1650 blocks_to_analyze is defined, use that set, otherwise the entire
1651 program. Include the defs if INCLUDE_DEFS. Include the uses if
1652 INCLUDE_USES. Include the eq_uses if INCLUDE_EQ_USES. */
1653
1654static void
1655df_reorganize_refs_by_insn (struct df_ref_info *ref_info,
48e1416a 1656 bool include_defs, bool include_uses,
3072d30e 1657 bool include_eq_uses)
1658{
1659 basic_block bb;
1660 unsigned int offset = 0;
1661
1662 ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses);
1663 df_check_and_grow_ref_info (ref_info, 1);
1664 if (df->blocks_to_analyze)
1665 {
1666 bitmap_iterator bi;
1667 unsigned int index;
1668
1669 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, index, bi)
1670 {
f5a6b05f 1671 offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK_FOR_FN (cfun,
1672 index),
1673 offset, ref_info,
48e1416a 1674 include_defs, include_uses,
3072d30e 1675 include_eq_uses);
1676 }
1677
1678 ref_info->table_size = offset;
1679 }
1680 else
1681 {
ed7d889a 1682 FOR_ALL_BB_FN (bb, cfun)
48e1416a 1683 offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info,
1684 include_defs, include_uses,
3072d30e 1685 include_eq_uses);
1686 ref_info->table_size = offset;
1687 }
1688}
1689
1690
1691/* If the use refs in DF are not organized, reorganize them. */
1692
48e1416a 1693void
3072d30e 1694df_maybe_reorganize_use_refs (enum df_ref_order order)
1695{
1696 if (order == df->use_info.ref_order)
1697 return;
1698
1699 switch (order)
1700 {
1701 case DF_REF_ORDER_BY_REG:
1702 df_reorganize_refs_by_reg (&df->use_info, false, true, false);
1703 break;
1704
1705 case DF_REF_ORDER_BY_REG_WITH_NOTES:
1706 df_reorganize_refs_by_reg (&df->use_info, false, true, true);
1707 break;
1708
1709 case DF_REF_ORDER_BY_INSN:
1710 df_reorganize_refs_by_insn (&df->use_info, false, true, false);
1711 break;
1712
1713 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
1714 df_reorganize_refs_by_insn (&df->use_info, false, true, true);
1715 break;
1716
1717 case DF_REF_ORDER_NO_TABLE:
1718 free (df->use_info.refs);
1719 df->use_info.refs = NULL;
1720 df->use_info.refs_size = 0;
1721 break;
1722
1723 case DF_REF_ORDER_UNORDERED:
1724 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
1725 gcc_unreachable ();
1726 break;
1727 }
48e1416a 1728
3072d30e 1729 df->use_info.ref_order = order;
1730}
1731
1732
1733/* If the def refs in DF are not organized, reorganize them. */
1734
48e1416a 1735void
3072d30e 1736df_maybe_reorganize_def_refs (enum df_ref_order order)
1737{
1738 if (order == df->def_info.ref_order)
1739 return;
1740
1741 switch (order)
1742 {
1743 case DF_REF_ORDER_BY_REG:
1744 df_reorganize_refs_by_reg (&df->def_info, true, false, false);
1745 break;
1746
1747 case DF_REF_ORDER_BY_INSN:
1748 df_reorganize_refs_by_insn (&df->def_info, true, false, false);
1749 break;
1750
1751 case DF_REF_ORDER_NO_TABLE:
1752 free (df->def_info.refs);
1753 df->def_info.refs = NULL;
1754 df->def_info.refs_size = 0;
1755 break;
1756
1757 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
1758 case DF_REF_ORDER_BY_REG_WITH_NOTES:
1759 case DF_REF_ORDER_UNORDERED:
1760 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
1761 gcc_unreachable ();
1762 break;
1763 }
48e1416a 1764
3072d30e 1765 df->def_info.ref_order = order;
1766}
1767
1768
3072d30e 1769/* Change all of the basic block references in INSN to use the insn's
48e1416a 1770 current basic block. This function is called from routines that move
1771 instructions from one block to another. */
3072d30e 1772
1773void
e149ca56 1774df_insn_change_bb (rtx_insn *insn, basic_block new_bb)
3072d30e 1775{
a2bdd643 1776 basic_block old_bb = BLOCK_FOR_INSN (insn);
3072d30e 1777 struct df_insn_info *insn_info;
1778 unsigned int uid = INSN_UID (insn);
1779
a2bdd643 1780 if (old_bb == new_bb)
1781 return;
1782
1783 set_block_for_insn (insn, new_bb);
1784
3072d30e 1785 if (!df)
1786 return;
1787
1788 if (dump_file)
1789 fprintf (dump_file, "changing bb of uid %d\n", uid);
1790
1791 insn_info = DF_INSN_UID_SAFE_GET (uid);
1792 if (insn_info == NULL)
1793 {
1794 if (dump_file)
1795 fprintf (dump_file, " unscanned insn\n");
1796 df_insn_rescan (insn);
1797 return;
1798 }
1799
1800 if (!INSN_P (insn))
1801 return;
1802
3072d30e 1803 df_set_bb_dirty (new_bb);
1804 if (old_bb)
1805 {
1806 if (dump_file)
48e1416a 1807 fprintf (dump_file, " from %d to %d\n",
3072d30e 1808 old_bb->index, new_bb->index);
1809 df_set_bb_dirty (old_bb);
1810 }
1811 else
1812 if (dump_file)
1813 fprintf (dump_file, " to %d\n", new_bb->index);
1814}
1815
1816
1817/* Helper function for df_ref_change_reg_with_loc. */
1818
1819static void
48e1416a 1820df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df,
ed6e85ae 1821 struct df_reg_info *new_df,
91f5b5cb 1822 unsigned int new_regno, rtx loc)
3072d30e 1823{
ed6e85ae 1824 df_ref the_ref = old_df->reg_chain;
3072d30e 1825
1826 while (the_ref)
1827 {
ed6e85ae 1828 if ((!DF_REF_IS_ARTIFICIAL (the_ref))
c989ecc1 1829 && DF_REF_LOC (the_ref)
ed6e85ae 1830 && (*DF_REF_LOC (the_ref) == loc))
3072d30e 1831 {
ed6e85ae 1832 df_ref next_ref = DF_REF_NEXT_REG (the_ref);
1833 df_ref prev_ref = DF_REF_PREV_REG (the_ref);
ddc2d0e3 1834 df_ref *ref_ptr;
ed6e85ae 1835 struct df_insn_info *insn_info = DF_REF_INSN_INFO (the_ref);
3072d30e 1836
1837 DF_REF_REGNO (the_ref) = new_regno;
1838 DF_REF_REG (the_ref) = regno_reg_rtx[new_regno];
1839
1840 /* Pull the_ref out of the old regno chain. */
1841 if (prev_ref)
ed6e85ae 1842 DF_REF_NEXT_REG (prev_ref) = next_ref;
3072d30e 1843 else
9ce37fa7 1844 old_df->reg_chain = next_ref;
3072d30e 1845 if (next_ref)
ed6e85ae 1846 DF_REF_PREV_REG (next_ref) = prev_ref;
9ce37fa7 1847 old_df->n_refs--;
3072d30e 1848
1849 /* Put the ref into the new regno chain. */
ed6e85ae 1850 DF_REF_PREV_REG (the_ref) = NULL;
1851 DF_REF_NEXT_REG (the_ref) = new_df->reg_chain;
9ce37fa7 1852 if (new_df->reg_chain)
ed6e85ae 1853 DF_REF_PREV_REG (new_df->reg_chain) = the_ref;
9ce37fa7 1854 new_df->reg_chain = the_ref;
1855 new_df->n_refs++;
a43c254c 1856 if (DF_REF_BB (the_ref))
1857 df_set_bb_dirty (DF_REF_BB (the_ref));
3072d30e 1858
ed6e85ae 1859 /* Need to sort the record again that the ref was in because
1860 the regno is a sorting key. First, find the right
1861 record. */
ddc2d0e3 1862 if (DF_REF_REG_DEF_P (the_ref))
1863 ref_ptr = &insn_info->defs;
1864 else if (DF_REF_FLAGS (the_ref) & DF_REF_IN_NOTE)
1865 ref_ptr = &insn_info->eq_uses;
3072d30e 1866 else
ddc2d0e3 1867 ref_ptr = &insn_info->uses;
ed6e85ae 1868 if (dump_file)
48e1416a 1869 fprintf (dump_file, "changing reg in insn %d\n",
1870 DF_REF_INSN_UID (the_ref));
1871
ddc2d0e3 1872 /* Stop if we find the current reference or where the reference
1873 needs to be. */
1874 while (*ref_ptr != the_ref && df_ref_compare (*ref_ptr, the_ref) < 0)
1875 ref_ptr = &DF_REF_NEXT_LOC (*ref_ptr);
1876 if (*ref_ptr != the_ref)
3072d30e 1877 {
ddc2d0e3 1878 /* The reference needs to be promoted up the list. */
1879 df_ref next = DF_REF_NEXT_LOC (the_ref);
1880 DF_REF_NEXT_LOC (the_ref) = *ref_ptr;
1881 *ref_ptr = the_ref;
1882 do
1883 ref_ptr = &DF_REF_NEXT_LOC (*ref_ptr);
1884 while (*ref_ptr != the_ref);
1885 *ref_ptr = next;
1886 }
1887 else if (DF_REF_NEXT_LOC (the_ref)
1888 && df_ref_compare (the_ref, DF_REF_NEXT_LOC (the_ref)) > 0)
1889 {
1890 /* The reference needs to be demoted down the list. */
1891 *ref_ptr = DF_REF_NEXT_LOC (the_ref);
1892 do
1893 ref_ptr = &DF_REF_NEXT_LOC (*ref_ptr);
1894 while (*ref_ptr && df_ref_compare (the_ref, *ref_ptr) > 0);
1895 DF_REF_NEXT_LOC (the_ref) = *ref_ptr;
1896 *ref_ptr = the_ref;
3072d30e 1897 }
3072d30e 1898
1899 the_ref = next_ref;
1900 }
1901 else
ed6e85ae 1902 the_ref = DF_REF_NEXT_REG (the_ref);
3072d30e 1903 }
1904}
1905
1906
91f5b5cb 1907/* Change the regno of register LOC to NEW_REGNO and update the df
1908 information accordingly. Refs that do not match LOC are not changed
1909 which means that artificial refs are not changed since they have no loc.
1910 This call is to support the SET_REGNO macro. */
3072d30e 1911
1912void
91f5b5cb 1913df_ref_change_reg_with_loc (rtx loc, unsigned int new_regno)
3072d30e 1914{
91f5b5cb 1915 unsigned int old_regno = REGNO (loc);
1916 if (old_regno == new_regno)
3072d30e 1917 return;
1918
91f5b5cb 1919 if (df)
1920 {
1921 df_grow_reg_info ();
1922
1923 df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno),
1924 DF_REG_DEF_GET (new_regno),
1925 new_regno, loc);
1926 df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno),
1927 DF_REG_USE_GET (new_regno),
1928 new_regno, loc);
1929 df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno),
1930 DF_REG_EQ_USE_GET (new_regno),
1931 new_regno, loc);
1932 }
1c0849e5 1933 set_mode_and_regno (loc, GET_MODE (loc), new_regno);
3072d30e 1934}
1935
1936
1937/* Delete the mw_hardregs that point into the eq_notes. */
1938
ddc2d0e3 1939static void
3072d30e 1940df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
1941{
ddc2d0e3 1942 struct df_mw_hardreg **mw_ptr = &insn_info->mw_hardregs;
48e1416a 1943 struct df_scan_problem_data *problem_data
3072d30e 1944 = (struct df_scan_problem_data *) df_scan->problem_data;
1945
ddc2d0e3 1946 while (*mw_ptr)
3072d30e 1947 {
ddc2d0e3 1948 df_mw_hardreg *mw = *mw_ptr;
1949 if (mw->flags & DF_REF_IN_NOTE)
3072d30e 1950 {
ddc2d0e3 1951 *mw_ptr = DF_MWS_NEXT (mw);
1952 pool_free (problem_data->mw_reg_pool, mw);
3072d30e 1953 }
1954 else
ddc2d0e3 1955 mw_ptr = &DF_MWS_NEXT (mw);
3072d30e 1956 }
3072d30e 1957}
1958
1959
1960/* Rescan only the REG_EQUIV/REG_EQUAL notes part of INSN. */
1961
1962void
e149ca56 1963df_notes_rescan (rtx_insn *insn)
3072d30e 1964{
1965 struct df_insn_info *insn_info;
1966 unsigned int uid = INSN_UID (insn);
1967
1968 if (!df)
1969 return;
1970
1971 /* The client has disabled rescanning and plans to do it itself. */
1972 if (df->changeable_flags & DF_NO_INSN_RESCAN)
1973 return;
1974
3e3659e7 1975 /* Do nothing if the insn hasn't been emitted yet. */
1976 if (!BLOCK_FOR_INSN (insn))
1977 return;
1978
3072d30e 1979 df_grow_bb_info (df_scan);
1980 df_grow_reg_info ();
1981
9af5ce0c 1982 insn_info = DF_INSN_UID_SAFE_GET (INSN_UID (insn));
3072d30e 1983
a3de79f9 1984 /* The client has deferred rescanning. */
3072d30e 1985 if (df->changeable_flags & DF_DEFER_INSN_RESCAN)
1986 {
1987 if (!insn_info)
1988 {
1989 insn_info = df_insn_create_insn_record (insn);
ddc2d0e3 1990 insn_info->defs = 0;
1991 insn_info->uses = 0;
1992 insn_info->eq_uses = 0;
1993 insn_info->mw_hardregs = 0;
3072d30e 1994 }
48e1416a 1995
4b5a4301 1996 bitmap_clear_bit (&df->insns_to_delete, uid);
3072d30e 1997 /* If the insn is set to be rescanned, it does not need to also
1998 be notes rescanned. */
4b5a4301 1999 if (!bitmap_bit_p (&df->insns_to_rescan, uid))
2000 bitmap_set_bit (&df->insns_to_notes_rescan, INSN_UID (insn));
3072d30e 2001 return;
2002 }
2003
4b5a4301 2004 bitmap_clear_bit (&df->insns_to_delete, uid);
2005 bitmap_clear_bit (&df->insns_to_notes_rescan, uid);
3072d30e 2006
2007 if (insn_info)
2008 {
2009 basic_block bb = BLOCK_FOR_INSN (insn);
2010 rtx note;
2011 struct df_collection_rec collection_rec;
ddc2d0e3 2012 unsigned int i;
3072d30e 2013
ddc2d0e3 2014 df_mw_hardreg_chain_delete_eq_uses (insn_info);
3072d30e 2015 df_ref_chain_delete (insn_info->eq_uses);
2016 insn_info->eq_uses = NULL;
2017
2018 /* Process REG_EQUIV/REG_EQUAL notes */
2019 for (note = REG_NOTES (insn); note;
2020 note = XEXP (note, 1))
2021 {
2022 switch (REG_NOTE_KIND (note))
2023 {
2024 case REG_EQUIV:
2025 case REG_EQUAL:
c989ecc1 2026 df_uses_record (&collection_rec,
3072d30e 2027 &XEXP (note, 0), DF_REF_REG_USE,
c989ecc1 2028 bb, insn_info, DF_REF_IN_NOTE);
3072d30e 2029 default:
2030 break;
2031 }
2032 }
2033
2034 /* Find some place to put any new mw_hardregs. */
2035 df_canonize_collection_rec (&collection_rec);
ddc2d0e3 2036 struct df_mw_hardreg **mw_ptr = &insn_info->mw_hardregs, *mw;
2037 FOR_EACH_VEC_ELT (collection_rec.mw_vec, i, mw)
3072d30e 2038 {
ddc2d0e3 2039 while (*mw_ptr && df_mw_compare (*mw_ptr, mw) < 0)
2040 mw_ptr = &DF_MWS_NEXT (*mw_ptr);
2041 DF_MWS_NEXT (mw) = *mw_ptr;
2042 *mw_ptr = mw;
2043 mw_ptr = &DF_MWS_NEXT (mw);
3072d30e 2044 }
d70aebca 2045 df_refs_add_to_chains (&collection_rec, bb, insn, copy_eq_uses);
3072d30e 2046 }
2047 else
2048 df_insn_rescan (insn);
2049
2050}
2051
2052\f
2053/*----------------------------------------------------------------------------
2054 Hard core instruction scanning code. No external interfaces here,
2055 just a lot of routines that look inside insns.
2056----------------------------------------------------------------------------*/
2057
2058
48e1416a 2059/* Return true if the contents of two df_ref's are identical.
3072d30e 2060 It ignores DF_REF_MARKER. */
2061
2062static bool
ed6e85ae 2063df_ref_equal_p (df_ref ref1, df_ref ref2)
3072d30e 2064{
2065 if (!ref2)
2066 return false;
48e1416a 2067
ed6e85ae 2068 if (ref1 == ref2)
2069 return true;
2070
2071 if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2)
2072 || DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2)
2073 || DF_REF_REG (ref1) != DF_REF_REG (ref2)
2074 || DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2)
48e1416a 2075 || ((DF_REF_FLAGS (ref1) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG))
ed6e85ae 2076 != (DF_REF_FLAGS (ref2) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG)))
2077 || DF_REF_BB (ref1) != DF_REF_BB (ref2)
2078 || DF_REF_INSN_INFO (ref1) != DF_REF_INSN_INFO (ref2))
2079 return false;
48e1416a 2080
ed6e85ae 2081 switch (DF_REF_CLASS (ref1))
2082 {
2083 case DF_REF_ARTIFICIAL:
2084 case DF_REF_BASE:
2085 return true;
30de5b55 2086
ed6e85ae 2087 case DF_REF_REGULAR:
2088 return DF_REF_LOC (ref1) == DF_REF_LOC (ref2);
30de5b55 2089
ed6e85ae 2090 default:
2091 gcc_unreachable ();
2092 }
2093 return false;
3072d30e 2094}
2095
2096
2097/* Compare REF1 and REF2 for sorting. This is only called from places
2098 where all of the refs are of the same type, in the same insn, and
2099 have the same bb. So these fields are not checked. */
2100
2101static int
ddc2d0e3 2102df_ref_compare (df_ref ref1, df_ref ref2)
3072d30e 2103{
ed6e85ae 2104 if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2))
2105 return (int)DF_REF_CLASS (ref1) - (int)DF_REF_CLASS (ref2);
2106
3072d30e 2107 if (DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2))
2108 return (int)DF_REF_REGNO (ref1) - (int)DF_REF_REGNO (ref2);
48e1416a 2109
3072d30e 2110 if (DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2))
2111 return (int)DF_REF_TYPE (ref1) - (int)DF_REF_TYPE (ref2);
2112
ed6e85ae 2113 if (DF_REF_REG (ref1) != DF_REF_REG (ref2))
2114 return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2);
2115
2116 /* Cannot look at the LOC field on artificial refs. */
2117 if (DF_REF_CLASS (ref1) != DF_REF_ARTIFICIAL
2118 && DF_REF_LOC (ref1) != DF_REF_LOC (ref2))
3072d30e 2119 return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2);
2120
2121 if (DF_REF_FLAGS (ref1) != DF_REF_FLAGS (ref2))
2122 {
2123 /* If two refs are identical except that one of them has is from
2124 a mw and one is not, we need to have the one with the mw
2125 first. */
2126 if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG) ==
2127 DF_REF_FLAGS_IS_SET (ref2, DF_REF_MW_HARDREG))
2128 return DF_REF_FLAGS (ref1) - DF_REF_FLAGS (ref2);
2129 else if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG))
2130 return -1;
2131 else
2132 return 1;
2133 }
30de5b55 2134
7496f4ad 2135 return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2);
3072d30e 2136}
2137
ddc2d0e3 2138/* Like df_ref_compare, but compare two df_ref* pointers R1 and R2. */
2139
2140static int
2141df_ref_ptr_compare (const void *r1, const void *r2)
2142{
2143 return df_ref_compare (*(const df_ref *) r1, *(const df_ref *) r2);
2144}
2145
3072d30e 2146static void
d70aebca 2147df_swap_refs (vec<df_ref, va_heap> *ref_vec, int i, int j)
3072d30e 2148{
f1f41a6c 2149 df_ref tmp = (*ref_vec)[i];
2150 (*ref_vec)[i] = (*ref_vec)[j];
2151 (*ref_vec)[j] = tmp;
3072d30e 2152}
2153
2154/* Sort and compress a set of refs. */
2155
f7f05a07 2156static void
d70aebca 2157df_sort_and_compress_refs (vec<df_ref, va_heap> *ref_vec)
3072d30e 2158{
f7f05a07 2159 unsigned int count;
3072d30e 2160 unsigned int i;
2161 unsigned int dist = 0;
2162
f1f41a6c 2163 count = ref_vec->length ();
f7f05a07 2164
3072d30e 2165 /* If there are 1 or 0 elements, there is nothing to do. */
2166 if (count < 2)
f7f05a07 2167 return;
3072d30e 2168 else if (count == 2)
2169 {
f1f41a6c 2170 df_ref r0 = (*ref_vec)[0];
2171 df_ref r1 = (*ref_vec)[1];
ddc2d0e3 2172 if (df_ref_compare (r0, r1) > 0)
3072d30e 2173 df_swap_refs (ref_vec, 0, 1);
2174 }
2175 else
2176 {
2177 for (i = 0; i < count - 1; i++)
f7f05a07 2178 {
f1f41a6c 2179 df_ref r0 = (*ref_vec)[i];
2180 df_ref r1 = (*ref_vec)[i + 1];
ddc2d0e3 2181 if (df_ref_compare (r0, r1) >= 0)
f7f05a07 2182 break;
2183 }
3072d30e 2184 /* If the array is already strictly ordered,
2185 which is the most common case for large COUNT case
2186 (which happens for CALL INSNs),
2187 no need to sort and filter out duplicate.
48e1416a 2188 Simply return the count.
3072d30e 2189 Make sure DF_GET_ADD_REFS adds refs in the increasing order
2190 of DF_REF_COMPARE. */
2191 if (i == count - 1)
f7f05a07 2192 return;
ddc2d0e3 2193 ref_vec->qsort (df_ref_ptr_compare);
3072d30e 2194 }
2195
2196 for (i=0; i<count-dist; i++)
2197 {
2198 /* Find the next ref that is not equal to the current ref. */
f7f05a07 2199 while (i + dist + 1 < count
f1f41a6c 2200 && df_ref_equal_p ((*ref_vec)[i],
2201 (*ref_vec)[i + dist + 1]))
3072d30e 2202 {
f1f41a6c 2203 df_free_ref ((*ref_vec)[i + dist + 1]);
3072d30e 2204 dist++;
2205 }
2206 /* Copy it down to the next position. */
f7f05a07 2207 if (dist && i + dist + 1 < count)
f1f41a6c 2208 (*ref_vec)[i + 1] = (*ref_vec)[i + dist + 1];
3072d30e 2209 }
2210
2211 count -= dist;
f1f41a6c 2212 ref_vec->truncate (count);
3072d30e 2213}
2214
2215
48e1416a 2216/* Return true if the contents of two df_ref's are identical.
3072d30e 2217 It ignores DF_REF_MARKER. */
2218
2219static bool
2220df_mw_equal_p (struct df_mw_hardreg *mw1, struct df_mw_hardreg *mw2)
2221{
2222 if (!mw2)
2223 return false;
2224 return (mw1 == mw2) ||
2225 (mw1->mw_reg == mw2->mw_reg
2226 && mw1->type == mw2->type
2227 && mw1->flags == mw2->flags
2228 && mw1->start_regno == mw2->start_regno
2229 && mw1->end_regno == mw2->end_regno);
2230}
2231
2232
2233/* Compare MW1 and MW2 for sorting. */
2234
2235static int
ddc2d0e3 2236df_mw_compare (const df_mw_hardreg *mw1, const df_mw_hardreg *mw2)
3072d30e 2237{
3072d30e 2238 if (mw1->type != mw2->type)
2239 return mw1->type - mw2->type;
2240
2241 if (mw1->flags != mw2->flags)
2242 return mw1->flags - mw2->flags;
2243
2244 if (mw1->start_regno != mw2->start_regno)
2245 return mw1->start_regno - mw2->start_regno;
2246
2247 if (mw1->end_regno != mw2->end_regno)
2248 return mw1->end_regno - mw2->end_regno;
2249
2250 if (mw1->mw_reg != mw2->mw_reg)
2251 return mw1->mw_order - mw2->mw_order;
2252
2253 return 0;
2254}
2255
ddc2d0e3 2256/* Like df_mw_compare, but compare two df_mw_hardreg** pointers R1 and R2. */
2257
2258static int
2259df_mw_ptr_compare (const void *m1, const void *m2)
2260{
2261 return df_mw_compare (*(const df_mw_hardreg *const *) m1,
2262 *(const df_mw_hardreg *const *) m2);
2263}
3072d30e 2264
2265/* Sort and compress a set of refs. */
2266
f7f05a07 2267static void
d70aebca 2268df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
3072d30e 2269{
f7f05a07 2270 unsigned int count;
48e1416a 2271 struct df_scan_problem_data *problem_data
3072d30e 2272 = (struct df_scan_problem_data *) df_scan->problem_data;
2273 unsigned int i;
2274 unsigned int dist = 0;
3072d30e 2275
f1f41a6c 2276 count = mw_vec->length ();
3072d30e 2277 if (count < 2)
f7f05a07 2278 return;
3072d30e 2279 else if (count == 2)
2280 {
f1f41a6c 2281 struct df_mw_hardreg *m0 = (*mw_vec)[0];
2282 struct df_mw_hardreg *m1 = (*mw_vec)[1];
ddc2d0e3 2283 if (df_mw_compare (m0, m1) > 0)
3072d30e 2284 {
f1f41a6c 2285 struct df_mw_hardreg *tmp = (*mw_vec)[0];
2286 (*mw_vec)[0] = (*mw_vec)[1];
2287 (*mw_vec)[1] = tmp;
3072d30e 2288 }
2289 }
2290 else
ddc2d0e3 2291 mw_vec->qsort (df_mw_ptr_compare);
3072d30e 2292
2293 for (i=0; i<count-dist; i++)
2294 {
2295 /* Find the next ref that is not equal to the current ref. */
f7f05a07 2296 while (i + dist + 1 < count
f1f41a6c 2297 && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
3072d30e 2298 {
f7f05a07 2299 pool_free (problem_data->mw_reg_pool,
f1f41a6c 2300 (*mw_vec)[i + dist + 1]);
3072d30e 2301 dist++;
2302 }
2303 /* Copy it down to the next position. */
f7f05a07 2304 if (dist && i + dist + 1 < count)
f1f41a6c 2305 (*mw_vec)[i + 1] = (*mw_vec)[i + dist + 1];
3072d30e 2306 }
2307
2308 count -= dist;
f1f41a6c 2309 mw_vec->truncate (count);
3072d30e 2310}
2311
2312
2313/* Sort and remove duplicates from the COLLECTION_REC. */
2314
2315static void
2316df_canonize_collection_rec (struct df_collection_rec *collection_rec)
2317{
f7f05a07 2318 df_sort_and_compress_refs (&collection_rec->def_vec);
2319 df_sort_and_compress_refs (&collection_rec->use_vec);
2320 df_sort_and_compress_refs (&collection_rec->eq_use_vec);
2321 df_sort_and_compress_mws (&collection_rec->mw_vec);
3072d30e 2322}
2323
2324
2325/* Add the new df_ref to appropriate reg_info/ref_info chains. */
2326
2327static void
48e1416a 2328df_install_ref (df_ref this_ref,
2329 struct df_reg_info *reg_info,
3072d30e 2330 struct df_ref_info *ref_info,
2331 bool add_to_table)
2332{
2333 unsigned int regno = DF_REF_REGNO (this_ref);
2334 /* Add the ref to the reg_{def,use,eq_use} chain. */
ed6e85ae 2335 df_ref head = reg_info->reg_chain;
3072d30e 2336
2337 reg_info->reg_chain = this_ref;
2338 reg_info->n_refs++;
2339
2340 if (DF_REF_FLAGS_IS_SET (this_ref, DF_HARD_REG_LIVE))
2341 {
2342 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2343 df->hard_regs_live_count[regno]++;
2344 }
2345
0ea2d350 2346 gcc_checking_assert (DF_REF_NEXT_REG (this_ref) == NULL
2347 && DF_REF_PREV_REG (this_ref) == NULL);
3072d30e 2348
2349 DF_REF_NEXT_REG (this_ref) = head;
2350
2351 /* We cannot actually link to the head of the chain. */
2352 DF_REF_PREV_REG (this_ref) = NULL;
2353
2354 if (head)
2355 DF_REF_PREV_REG (head) = this_ref;
48e1416a 2356
3072d30e 2357 if (add_to_table)
2358 {
2359 gcc_assert (ref_info->ref_order != DF_REF_ORDER_NO_TABLE);
2360 df_check_and_grow_ref_info (ref_info, 1);
2361 DF_REF_ID (this_ref) = ref_info->table_size;
2362 /* Add the ref to the big array of defs. */
2363 ref_info->refs[ref_info->table_size] = this_ref;
2364 ref_info->table_size++;
48e1416a 2365 }
3072d30e 2366 else
2367 DF_REF_ID (this_ref) = -1;
48e1416a 2368
3072d30e 2369 ref_info->total_size++;
2370}
2371
2372
2373/* This function takes one of the groups of refs (defs, uses or
2374 eq_uses) and installs the entire group into the insn. It also adds
2375 each of these refs into the appropriate chains. */
2376
ddc2d0e3 2377static df_ref
3072d30e 2378df_install_refs (basic_block bb,
d70aebca 2379 const vec<df_ref, va_heap> *old_vec,
48e1416a 2380 struct df_reg_info **reg_info,
3072d30e 2381 struct df_ref_info *ref_info,
2382 bool is_notes)
2383{
d70aebca 2384 unsigned int count = old_vec->length ();
3072d30e 2385 if (count)
2386 {
3072d30e 2387 bool add_to_table;
f7f05a07 2388 df_ref this_ref;
2389 unsigned int ix;
3072d30e 2390
2391 switch (ref_info->ref_order)
2392 {
2393 case DF_REF_ORDER_UNORDERED_WITH_NOTES:
2394 case DF_REF_ORDER_BY_REG_WITH_NOTES:
2395 case DF_REF_ORDER_BY_INSN_WITH_NOTES:
2396 ref_info->ref_order = DF_REF_ORDER_UNORDERED_WITH_NOTES;
2397 add_to_table = true;
2398 break;
2399 case DF_REF_ORDER_UNORDERED:
2400 case DF_REF_ORDER_BY_REG:
2401 case DF_REF_ORDER_BY_INSN:
2402 ref_info->ref_order = DF_REF_ORDER_UNORDERED;
2403 add_to_table = !is_notes;
2404 break;
2405 default:
2406 add_to_table = false;
2407 break;
2408 }
2409
2410 /* Do not add if ref is not in the right blocks. */
2411 if (add_to_table && df->analyze_subset)
2412 add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index);
2413
d70aebca 2414 FOR_EACH_VEC_ELT (*old_vec, ix, this_ref)
3072d30e 2415 {
ddc2d0e3 2416 DF_REF_NEXT_LOC (this_ref) = (ix + 1 < old_vec->length ()
2417 ? (*old_vec)[ix + 1]
2418 : NULL);
48e1416a 2419 df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)],
3072d30e 2420 ref_info, add_to_table);
2421 }
ddc2d0e3 2422 return (*old_vec)[0];
3072d30e 2423 }
2424 else
ddc2d0e3 2425 return 0;
3072d30e 2426}
2427
2428
2429/* This function takes the mws installs the entire group into the
2430 insn. */
2431
ddc2d0e3 2432static struct df_mw_hardreg *
d70aebca 2433df_install_mws (const vec<df_mw_hardreg_ptr, va_heap> *old_vec)
3072d30e 2434{
d70aebca 2435 unsigned int count = old_vec->length ();
3072d30e 2436 if (count)
2437 {
ddc2d0e3 2438 for (unsigned int i = 0; i < count - 1; i++)
2439 DF_MWS_NEXT ((*old_vec)[i]) = (*old_vec)[i + 1];
2440 DF_MWS_NEXT ((*old_vec)[count - 1]) = 0;
2441 return (*old_vec)[0];
3072d30e 2442 }
2443 else
ddc2d0e3 2444 return 0;
3072d30e 2445}
2446
2447
2448/* Add a chain of df_refs to appropriate ref chain/reg_info/ref_info
2449 chains and update other necessary information. */
2450
2451static void
48e1416a 2452df_refs_add_to_chains (struct df_collection_rec *collection_rec,
e149ca56 2453 basic_block bb, rtx_insn *insn, unsigned int flags)
3072d30e 2454{
2455 if (insn)
2456 {
158b6cc9 2457 struct df_insn_info *insn_rec = DF_INSN_INFO_GET (insn);
3072d30e 2458 /* If there is a vector in the collection rec, add it to the
2459 insn. A null rec is a signal that the caller will handle the
2460 chain specially. */
d70aebca 2461 if (flags & copy_defs)
3072d30e 2462 {
ddc2d0e3 2463 gcc_checking_assert (!insn_rec->defs);
48e1416a 2464 insn_rec->defs
d70aebca 2465 = df_install_refs (bb, &collection_rec->def_vec,
3072d30e 2466 df->def_regs,
2467 &df->def_info, false);
2468 }
d70aebca 2469 if (flags & copy_uses)
3072d30e 2470 {
ddc2d0e3 2471 gcc_checking_assert (!insn_rec->uses);
48e1416a 2472 insn_rec->uses
d70aebca 2473 = df_install_refs (bb, &collection_rec->use_vec,
3072d30e 2474 df->use_regs,
2475 &df->use_info, false);
2476 }
d70aebca 2477 if (flags & copy_eq_uses)
3072d30e 2478 {
ddc2d0e3 2479 gcc_checking_assert (!insn_rec->eq_uses);
48e1416a 2480 insn_rec->eq_uses
d70aebca 2481 = df_install_refs (bb, &collection_rec->eq_use_vec,
3072d30e 2482 df->eq_use_regs,
2483 &df->use_info, true);
2484 }
d70aebca 2485 if (flags & copy_mw)
3072d30e 2486 {
ddc2d0e3 2487 gcc_checking_assert (!insn_rec->mw_hardregs);
48e1416a 2488 insn_rec->mw_hardregs
d70aebca 2489 = df_install_mws (&collection_rec->mw_vec);
3072d30e 2490 }
2491 }
2492 else
2493 {
2494 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
2495
ddc2d0e3 2496 gcc_checking_assert (!bb_info->artificial_defs);
48e1416a 2497 bb_info->artificial_defs
d70aebca 2498 = df_install_refs (bb, &collection_rec->def_vec,
3072d30e 2499 df->def_regs,
2500 &df->def_info, false);
ddc2d0e3 2501 gcc_checking_assert (!bb_info->artificial_uses);
48e1416a 2502 bb_info->artificial_uses
d70aebca 2503 = df_install_refs (bb, &collection_rec->use_vec,
3072d30e 2504 df->use_regs,
2505 &df->use_info, false);
2506 }
2507}
e011eba9 2508
e011eba9 2509
c989ecc1 2510/* Allocate a ref and initialize its fields. */
3072d30e 2511
48e1416a 2512static df_ref
2513df_ref_create_structure (enum df_ref_class cl,
ed6e85ae 2514 struct df_collection_rec *collection_rec,
48e1416a 2515 rtx reg, rtx *loc,
158b6cc9 2516 basic_block bb, struct df_insn_info *info,
48e1416a 2517 enum df_ref_type ref_type,
c989ecc1 2518 int ref_flags)
3072d30e 2519{
ed6e85ae 2520 df_ref this_ref = NULL;
3072d30e 2521 int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
2522 struct df_scan_problem_data *problem_data
2523 = (struct df_scan_problem_data *) df_scan->problem_data;
2524
ed6e85ae 2525 switch (cl)
30de5b55 2526 {
ed6e85ae 2527 case DF_REF_BASE:
2528 this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
0ea2d350 2529 gcc_checking_assert (loc == NULL);
ed6e85ae 2530 break;
2531
2532 case DF_REF_ARTIFICIAL:
2533 this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
2534 this_ref->artificial_ref.bb = bb;
0ea2d350 2535 gcc_checking_assert (loc == NULL);
ed6e85ae 2536 break;
2537
2538 case DF_REF_REGULAR:
2539 this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
2540 this_ref->regular_ref.loc = loc;
0ea2d350 2541 gcc_checking_assert (loc);
ed6e85ae 2542 break;
30de5b55 2543 }
ed6e85ae 2544
2545 DF_REF_CLASS (this_ref) = cl;
3072d30e 2546 DF_REF_ID (this_ref) = -1;
2547 DF_REF_REG (this_ref) = reg;
2548 DF_REF_REGNO (this_ref) = regno;
ed6e85ae 2549 DF_REF_TYPE (this_ref) = ref_type;
158b6cc9 2550 DF_REF_INSN_INFO (this_ref) = info;
3072d30e 2551 DF_REF_CHAIN (this_ref) = NULL;
3072d30e 2552 DF_REF_FLAGS (this_ref) = ref_flags;
3072d30e 2553 DF_REF_NEXT_REG (this_ref) = NULL;
2554 DF_REF_PREV_REG (this_ref) = NULL;
2555 DF_REF_ORDER (this_ref) = df->ref_order++;
2556
2557 /* We need to clear this bit because fwprop, and in the future
2558 possibly other optimizations sometimes create new refs using ond
2559 refs as the model. */
2560 DF_REF_FLAGS_CLEAR (this_ref, DF_HARD_REG_LIVE);
2561
2562 /* See if this ref needs to have DF_HARD_REG_LIVE bit set. */
5df3e7ea 2563 if (regno < FIRST_PSEUDO_REGISTER
2564 && !DF_REF_IS_ARTIFICIAL (this_ref)
2565 && !DEBUG_INSN_P (DF_REF_INSN (this_ref)))
3072d30e 2566 {
ed6e85ae 2567 if (DF_REF_REG_DEF_P (this_ref))
3072d30e 2568 {
2569 if (!DF_REF_FLAGS_IS_SET (this_ref, DF_REF_MAY_CLOBBER))
2570 DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE);
2571 }
2572 else if (!(TEST_HARD_REG_BIT (elim_reg_set, regno)
2573 && (regno == FRAME_POINTER_REGNUM
2574 || regno == ARG_POINTER_REGNUM)))
2575 DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE);
2576 }
2577
2578 if (collection_rec)
2579 {
ed6e85ae 2580 if (DF_REF_REG_DEF_P (this_ref))
f1f41a6c 2581 collection_rec->def_vec.safe_push (this_ref);
3072d30e 2582 else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE)
f1f41a6c 2583 collection_rec->eq_use_vec.safe_push (this_ref);
3072d30e 2584 else
f1f41a6c 2585 collection_rec->use_vec.safe_push (this_ref);
e011eba9 2586 }
4ffe0526 2587 else
2588 df_install_ref_incremental (this_ref);
3072d30e 2589
e011eba9 2590 return this_ref;
2591}
2592
2593
2594/* Create new references of type DF_REF_TYPE for each part of register REG
c989ecc1 2595 at address LOC within INSN of BB. */
30de5b55 2596
e011eba9 2597
2598static void
48e1416a 2599df_ref_record (enum df_ref_class cl,
ed6e85ae 2600 struct df_collection_rec *collection_rec,
48e1416a 2601 rtx reg, rtx *loc,
158b6cc9 2602 basic_block bb, struct df_insn_info *insn_info,
48e1416a 2603 enum df_ref_type ref_type,
c989ecc1 2604 int ref_flags)
e011eba9 2605{
3e6933a8 2606 unsigned int regno;
e011eba9 2607
0ea2d350 2608 gcc_checking_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
e011eba9 2609
e011eba9 2610 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
2611 if (regno < FIRST_PSEUDO_REGISTER)
2612 {
3e6933a8 2613 struct df_mw_hardreg *hardreg = NULL;
2614 struct df_scan_problem_data *problem_data
3072d30e 2615 = (struct df_scan_problem_data *) df_scan->problem_data;
2616 unsigned int i;
2617 unsigned int endregno;
ed6e85ae 2618 df_ref ref;
e011eba9 2619
e011eba9 2620 if (GET_CODE (reg) == SUBREG)
fe2ebfc8 2621 {
2622 regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
2623 SUBREG_BYTE (reg), GET_MODE (reg));
a2c6f0b7 2624 endregno = regno + subreg_nregs (reg);
fe2ebfc8 2625 }
2626 else
788bed51 2627 endregno = END_REGNO (reg);
e011eba9 2628
3072d30e 2629 /* If this is a multiword hardreg, we create some extra
2630 datastructures that will enable us to easily build REG_DEAD
2631 and REG_UNUSED notes. */
4ffe0526 2632 if (collection_rec
2633 && (endregno != regno + 1) && insn_info)
3e6933a8 2634 {
48e1416a 2635 /* Sets to a subreg of a multiword register are partial.
3e6933a8 2636 Sets to a non-subreg of a multiword register are not. */
05beda33 2637 if (GET_CODE (reg) == SUBREG)
3e6933a8 2638 ref_flags |= DF_REF_PARTIAL;
2639 ref_flags |= DF_REF_MW_HARDREG;
3072d30e 2640
364c0c59 2641 hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
3e6933a8 2642 hardreg->type = ref_type;
2643 hardreg->flags = ref_flags;
2644 hardreg->mw_reg = reg;
3072d30e 2645 hardreg->start_regno = regno;
2646 hardreg->end_regno = endregno - 1;
2647 hardreg->mw_order = df->ref_order++;
f1f41a6c 2648 collection_rec->mw_vec.safe_push (hardreg);
3e6933a8 2649 }
2650
e011eba9 2651 for (i = regno; i < endregno; i++)
2652 {
48e1416a 2653 ref = df_ref_create_structure (cl, collection_rec, regno_reg_rtx[i], loc,
c989ecc1 2654 bb, insn_info, ref_type, ref_flags);
3e6933a8 2655
3072d30e 2656 gcc_assert (ORIGINAL_REGNO (DF_REF_REG (ref)) == i);
e011eba9 2657 }
2658 }
2659 else
2660 {
48e1416a 2661 df_ref_create_structure (cl, collection_rec, reg, loc, bb, insn_info,
c989ecc1 2662 ref_type, ref_flags);
e011eba9 2663 }
2664}
2665
2666
2667/* A set to a non-paradoxical SUBREG for which the number of word_mode units
2668 covered by the outer mode is smaller than that covered by the inner mode,
2669 is a read-modify-write operation.
2670 This function returns true iff the SUBREG X is such a SUBREG. */
2671
2672bool
2673df_read_modify_subreg_p (rtx x)
2674{
2675 unsigned int isize, osize;
2676 if (GET_CODE (x) != SUBREG)
2677 return false;
2678 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
2679 osize = GET_MODE_SIZE (GET_MODE (x));
c9a4672f 2680 return isize > osize
2681 && isize > REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
e011eba9 2682}
2683
2684
f7583c76 2685/* Process all the registers defined in the rtx pointed by LOC.
2686 Autoincrement/decrement definitions will be picked up by df_uses_record.
2687 Any change here has to be matched in df_find_hard_reg_defs_1. */
e011eba9 2688
2689static void
3072d30e 2690df_def_record_1 (struct df_collection_rec *collection_rec,
f7583c76 2691 rtx *loc, basic_block bb, struct df_insn_info *insn_info,
b9c74b4d 2692 int flags)
e011eba9 2693{
f7583c76 2694 rtx dst = *loc;
e011eba9 2695
35792caf 2696 /* It is legal to have a set destination be a parallel. */
2697 if (GET_CODE (dst) == PARALLEL)
e011eba9 2698 {
2699 int i;
e011eba9 2700 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
2701 {
2702 rtx temp = XVECEXP (dst, 0, i);
f7583c76 2703 gcc_assert (GET_CODE (temp) == EXPR_LIST);
2704 df_def_record_1 (collection_rec, &XEXP (temp, 0),
2705 bb, insn_info, flags);
e011eba9 2706 }
2707 return;
2708 }
2709
30de5b55 2710 if (GET_CODE (dst) == STRICT_LOW_PART)
e011eba9 2711 {
30de5b55 2712 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_STRICT_LOW_PART;
2713
2714 loc = &XEXP (dst, 0);
2715 dst = *loc;
2716 }
2717
2718 if (GET_CODE (dst) == ZERO_EXTRACT)
2719 {
2720 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_ZERO_EXTRACT;
48e1416a 2721
e011eba9 2722 loc = &XEXP (dst, 0);
2723 dst = *loc;
e011eba9 2724 }
2725
dea7b504 2726 /* At this point if we do not have a reg or a subreg, just return. */
2727 if (REG_P (dst))
2728 {
c989ecc1 2729 df_ref_record (DF_REF_REGULAR, collection_rec,
2730 dst, loc, bb, insn_info, DF_REF_REG_DEF, flags);
dea7b504 2731
2732 /* We want to keep sp alive everywhere - by making all
2733 writes to sp also use of sp. */
2734 if (REGNO (dst) == STACK_POINTER_REGNUM)
ed6e85ae 2735 df_ref_record (DF_REF_BASE, collection_rec,
c989ecc1 2736 dst, NULL, bb, insn_info, DF_REF_REG_USE, flags);
dea7b504 2737 }
2738 else if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst)))
2739 {
2740 if (df_read_modify_subreg_p (dst))
2741 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL;
c9a4672f 2742
dea7b504 2743 flags |= DF_REF_SUBREG;
37ad3318 2744
c989ecc1 2745 df_ref_record (DF_REF_REGULAR, collection_rec,
2746 dst, loc, bb, insn_info, DF_REF_REG_DEF, flags);
dea7b504 2747 }
e011eba9 2748}
2749
2750
f7583c76 2751/* Process all the registers defined in the pattern rtx, X. Any change
2752 here has to be matched in df_find_hard_reg_defs. */
e011eba9 2753
2754static void
48e1416a 2755df_defs_record (struct df_collection_rec *collection_rec,
158b6cc9 2756 rtx x, basic_block bb, struct df_insn_info *insn_info,
b9c74b4d 2757 int flags)
e011eba9 2758{
2759 RTX_CODE code = GET_CODE (x);
f7583c76 2760 int i;
e011eba9 2761
f7583c76 2762 switch (code)
e011eba9 2763 {
f7583c76 2764 case SET:
2765 df_def_record_1 (collection_rec, &SET_DEST (x), bb, insn_info, flags);
2766 break;
2767
2768 case CLOBBER:
2769 flags |= DF_REF_MUST_CLOBBER;
2770 df_def_record_1 (collection_rec, &XEXP (x, 0), bb, insn_info, flags);
2771 break;
2772
2773 case COND_EXEC:
48e1416a 2774 df_defs_record (collection_rec, COND_EXEC_CODE (x),
158b6cc9 2775 bb, insn_info, DF_REF_CONDITIONAL);
f7583c76 2776 break;
2777
2778 case PARALLEL:
2779 for (i = 0; i < XVECLEN (x, 0); i++)
2780 df_defs_record (collection_rec, XVECEXP (x, 0, i),
2781 bb, insn_info, flags);
2782 break;
2783 default:
2784 /* No DEFs to record in other cases */
2785 break;
e011eba9 2786 }
f7583c76 2787}
2788
2789/* Set bits in *DEFS for hard registers found in the rtx DST, which is the
2790 destination of a set or clobber. This has to match the logic in
2791 df_defs_record_1. */
2792
2793static void
2794df_find_hard_reg_defs_1 (rtx dst, HARD_REG_SET *defs)
2795{
2796 /* It is legal to have a set destination be a parallel. */
2797 if (GET_CODE (dst) == PARALLEL)
e011eba9 2798 {
2799 int i;
f7583c76 2800 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
2801 {
2802 rtx temp = XVECEXP (dst, 0, i);
2803 gcc_assert (GET_CODE (temp) == EXPR_LIST);
2804 df_find_hard_reg_defs_1 (XEXP (temp, 0), defs);
2805 }
2806 return;
2807 }
2808
2809 if (GET_CODE (dst) == STRICT_LOW_PART)
2810 dst = XEXP (dst, 0);
2811
2812 if (GET_CODE (dst) == ZERO_EXTRACT)
2813 dst = XEXP (dst, 0);
2814
2815 /* At this point if we do not have a reg or a subreg, just return. */
2816 if (REG_P (dst) && HARD_REGISTER_P (dst))
2817 SET_HARD_REG_BIT (*defs, REGNO (dst));
2818 else if (GET_CODE (dst) == SUBREG
2819 && REG_P (SUBREG_REG (dst)) && HARD_REGISTER_P (dst))
2820 SET_HARD_REG_BIT (*defs, REGNO (SUBREG_REG (dst)));
2821}
2822
2823/* Set bits in *DEFS for hard registers defined in the pattern X. This
2824 has to match the logic in df_defs_record. */
e011eba9 2825
f7583c76 2826static void
2827df_find_hard_reg_defs (rtx x, HARD_REG_SET *defs)
2828{
2829 RTX_CODE code = GET_CODE (x);
2830 int i;
2831
2832 switch (code)
2833 {
2834 case SET:
2835 df_find_hard_reg_defs_1 (SET_DEST (x), defs);
2836 break;
2837
2838 case CLOBBER:
2839 df_find_hard_reg_defs_1 (XEXP (x, 0), defs);
2840 break;
2841
2842 case COND_EXEC:
2843 df_find_hard_reg_defs (COND_EXEC_CODE (x), defs);
2844 break;
2845
2846 case PARALLEL:
2847 for (i = 0; i < XVECLEN (x, 0); i++)
2848 df_find_hard_reg_defs (XVECEXP (x, 0, i), defs);
2849 break;
2850 default:
2851 /* No DEFs to record in other cases */
2852 break;
e011eba9 2853 }
2854}
2855
2856
c989ecc1 2857/* Process all the registers used in the rtx at address LOC. */
e011eba9 2858
2859static void
c989ecc1 2860df_uses_record (struct df_collection_rec *collection_rec,
3072d30e 2861 rtx *loc, enum df_ref_type ref_type,
158b6cc9 2862 basic_block bb, struct df_insn_info *insn_info,
c989ecc1 2863 int flags)
e011eba9 2864{
2865 RTX_CODE code;
2866 rtx x;
3072d30e 2867
e011eba9 2868 retry:
2869 x = *loc;
2870 if (!x)
2871 return;
2872 code = GET_CODE (x);
2873 switch (code)
2874 {
2875 case LABEL_REF:
2876 case SYMBOL_REF:
e011eba9 2877 case CONST:
0349edce 2878 CASE_CONST_ANY:
e011eba9 2879 case PC:
2880 case CC0:
2881 case ADDR_VEC:
2882 case ADDR_DIFF_VEC:
2883 return;
2884
2885 case CLOBBER:
2886 /* If we are clobbering a MEM, mark any registers inside the address
2887 as being used. */
2888 if (MEM_P (XEXP (x, 0)))
c989ecc1 2889 df_uses_record (collection_rec,
3072d30e 2890 &XEXP (XEXP (x, 0), 0),
158b6cc9 2891 DF_REF_REG_MEM_STORE,
2892 bb, insn_info,
c989ecc1 2893 flags);
e011eba9 2894
2895 /* If we're clobbering a REG then we have a def so ignore. */
2896 return;
2897
2898 case MEM:
c989ecc1 2899 df_uses_record (collection_rec,
48e1416a 2900 &XEXP (x, 0), DF_REF_REG_MEM_LOAD,
c989ecc1 2901 bb, insn_info, flags & DF_REF_IN_NOTE);
e011eba9 2902 return;
2903
2904 case SUBREG:
2905 /* While we're here, optimize this case. */
3e6933a8 2906 flags |= DF_REF_PARTIAL;
e011eba9 2907 /* In case the SUBREG is not of a REG, do not optimize. */
2908 if (!REG_P (SUBREG_REG (x)))
2909 {
2910 loc = &SUBREG_REG (x);
c989ecc1 2911 df_uses_record (collection_rec, loc, ref_type, bb, insn_info, flags);
e011eba9 2912 return;
2913 }
2914 /* ... Fall through ... */
2915
2916 case REG:
c989ecc1 2917 df_ref_record (DF_REF_REGULAR, collection_rec,
158b6cc9 2918 x, loc, bb, insn_info,
c989ecc1 2919 ref_type, flags);
e011eba9 2920 return;
2921
30de5b55 2922 case SIGN_EXTRACT:
2923 case ZERO_EXTRACT:
2924 {
c989ecc1 2925 df_uses_record (collection_rec,
2926 &XEXP (x, 1), ref_type, bb, insn_info, flags);
2927 df_uses_record (collection_rec,
2928 &XEXP (x, 2), ref_type, bb, insn_info, flags);
2929
2930 /* If the parameters to the zero or sign extract are
2931 constants, strip them off and recurse, otherwise there is
2932 no information that we can gain from this operation. */
2933 if (code == ZERO_EXTRACT)
2934 flags |= DF_REF_ZERO_EXTRACT;
2935 else
2936 flags |= DF_REF_SIGN_EXTRACT;
2937
2938 df_uses_record (collection_rec,
2939 &XEXP (x, 0), ref_type, bb, insn_info, flags);
2940 return;
30de5b55 2941 }
2942 break;
2943
e011eba9 2944 case SET:
2945 {
2946 rtx dst = SET_DEST (x);
2947 gcc_assert (!(flags & DF_REF_IN_NOTE));
c989ecc1 2948 df_uses_record (collection_rec,
2949 &SET_SRC (x), DF_REF_REG_USE, bb, insn_info, flags);
e011eba9 2950
2951 switch (GET_CODE (dst))
2952 {
2953 case SUBREG:
2954 if (df_read_modify_subreg_p (dst))
2955 {
c989ecc1 2956 df_uses_record (collection_rec, &SUBREG_REG (dst),
48e1416a 2957 DF_REF_REG_USE, bb, insn_info,
c989ecc1 2958 flags | DF_REF_READ_WRITE | DF_REF_SUBREG);
e011eba9 2959 break;
2960 }
2961 /* Fall through. */
2962 case REG:
2963 case PARALLEL:
2964 case SCRATCH:
2965 case PC:
2966 case CC0:
2967 break;
2968 case MEM:
c989ecc1 2969 df_uses_record (collection_rec, &XEXP (dst, 0),
2970 DF_REF_REG_MEM_STORE, bb, insn_info, flags);
e011eba9 2971 break;
2972 case STRICT_LOW_PART:
2973 {
2974 rtx *temp = &XEXP (dst, 0);
2975 /* A strict_low_part uses the whole REG and not just the
2976 SUBREG. */
2977 dst = XEXP (dst, 0);
c989ecc1 2978 df_uses_record (collection_rec,
48e1416a 2979 (GET_CODE (dst) == SUBREG) ? &SUBREG_REG (dst) : temp,
158b6cc9 2980 DF_REF_REG_USE, bb, insn_info,
c989ecc1 2981 DF_REF_READ_WRITE | DF_REF_STRICT_LOW_PART);
e011eba9 2982 }
2983 break;
2984 case ZERO_EXTRACT:
30de5b55 2985 {
c989ecc1 2986 df_uses_record (collection_rec, &XEXP (dst, 1),
2987 DF_REF_REG_USE, bb, insn_info, flags);
2988 df_uses_record (collection_rec, &XEXP (dst, 2),
2989 DF_REF_REG_USE, bb, insn_info, flags);
2990 if (GET_CODE (XEXP (dst,0)) == MEM)
2991 df_uses_record (collection_rec, &XEXP (dst, 0),
2992 DF_REF_REG_USE, bb, insn_info,
2993 flags);
2994 else
2995 df_uses_record (collection_rec, &XEXP (dst, 0),
2996 DF_REF_REG_USE, bb, insn_info,
2997 DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT);
30de5b55 2998 }
e011eba9 2999 break;
30de5b55 3000
e011eba9 3001 default:
3002 gcc_unreachable ();
3003 }
3004 return;
3005 }
3006
3007 case RETURN:
9cb2517e 3008 case SIMPLE_RETURN:
e011eba9 3009 break;
3010
3011 case ASM_OPERANDS:
3012 case UNSPEC_VOLATILE:
3013 case TRAP_IF:
3014 case ASM_INPUT:
3015 {
3016 /* Traditional and volatile asm instructions must be
3017 considered to use and clobber all hard registers, all
3018 pseudo-registers and all of memory. So must TRAP_IF and
3019 UNSPEC_VOLATILE operations.
3020
3021 Consider for instance a volatile asm that changes the fpu
3022 rounding mode. An insn should not be moved across this
3023 even if it only uses pseudo-regs because it might give an
3024 incorrectly rounded result.
3025
3026 However, flow.c's liveness computation did *not* do this,
3027 giving the reasoning as " ?!? Unfortunately, marking all
3028 hard registers as live causes massive problems for the
3029 register allocator and marking all pseudos as live creates
3030 mountains of uninitialized variable warnings."
3031
3032 In order to maintain the status quo with regard to liveness
3033 and uses, we do what flow.c did and just mark any regs we
3072d30e 3034 can find in ASM_OPERANDS as used. In global asm insns are
3035 scanned and regs_asm_clobbered is filled out.
e011eba9 3036
3037 For all ASM_OPERANDS, we must traverse the vector of input
3038 operands. We can not just fall through here since then we
3039 would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
3040 which do not indicate traditional asms unlike their normal
3041 usage. */
3042 if (code == ASM_OPERANDS)
3043 {
3044 int j;
3045
3046 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
c989ecc1 3047 df_uses_record (collection_rec, &ASM_OPERANDS_INPUT (x, j),
3048 DF_REF_REG_USE, bb, insn_info, flags);
e011eba9 3049 return;
3050 }
3051 break;
3052 }
3053
9845d120 3054 case VAR_LOCATION:
c989ecc1 3055 df_uses_record (collection_rec,
9845d120 3056 &PAT_VAR_LOCATION_LOC (x),
c989ecc1 3057 DF_REF_REG_USE, bb, insn_info, flags);
9845d120 3058 return;
3059
e011eba9 3060 case PRE_DEC:
3061 case POST_DEC:
3062 case PRE_INC:
3063 case POST_INC:
3064 case PRE_MODIFY:
3065 case POST_MODIFY:
9845d120 3066 gcc_assert (!DEBUG_INSN_P (insn_info->insn));
e011eba9 3067 /* Catch the def of the register being modified. */
c989ecc1 3068 df_ref_record (DF_REF_REGULAR, collection_rec, XEXP (x, 0), &XEXP (x, 0),
48e1416a 3069 bb, insn_info,
7577c7f7 3070 DF_REF_REG_DEF,
c989ecc1 3071 flags | DF_REF_READ_WRITE | DF_REF_PRE_POST_MODIFY);
e011eba9 3072
3073 /* ... Fall through to handle uses ... */
3074
3075 default:
3076 break;
3077 }
3078
3079 /* Recursively scan the operands of this expression. */
3080 {
3081 const char *fmt = GET_RTX_FORMAT (code);
3082 int i;
3083
3084 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3085 {
3086 if (fmt[i] == 'e')
3087 {
3088 /* Tail recursive case: save a function call level. */
3089 if (i == 0)
3090 {
3091 loc = &XEXP (x, 0);
3092 goto retry;
3093 }
c989ecc1 3094 df_uses_record (collection_rec, &XEXP (x, i), ref_type,
3095 bb, insn_info, flags);
e011eba9 3096 }
3097 else if (fmt[i] == 'E')
3098 {
3099 int j;
3100 for (j = 0; j < XVECLEN (x, i); j++)
c989ecc1 3101 df_uses_record (collection_rec,
48e1416a 3102 &XVECEXP (x, i, j), ref_type,
c989ecc1 3103 bb, insn_info, flags);
e011eba9 3104 }
3105 }
3106 }
e011eba9 3107
3072d30e 3108 return;
e011eba9 3109}
3110
3111
3072d30e 3112/* For all DF_REF_CONDITIONAL defs, add a corresponding uses. */
e011eba9 3113
3072d30e 3114static void
3115df_get_conditional_uses (struct df_collection_rec *collection_rec)
e011eba9 3116{
f7f05a07 3117 unsigned int ix;
3118 df_ref ref;
3119
f1f41a6c 3120 FOR_EACH_VEC_ELT (collection_rec->def_vec, ix, ref)
3072d30e 3121 {
3072d30e 3122 if (DF_REF_FLAGS_IS_SET (ref, DF_REF_CONDITIONAL))
3123 {
ed6e85ae 3124 df_ref use;
30de5b55 3125
ed6e85ae 3126 use = df_ref_create_structure (DF_REF_CLASS (ref), collection_rec, DF_REF_REG (ref),
30de5b55 3127 DF_REF_LOC (ref), DF_REF_BB (ref),
158b6cc9 3128 DF_REF_INSN_INFO (ref), DF_REF_REG_USE,
c989ecc1 3129 DF_REF_FLAGS (ref) & ~DF_REF_CONDITIONAL);
3072d30e 3130 DF_REF_REGNO (use) = DF_REF_REGNO (ref);
3131 }
3132 }
e011eba9 3133}
3134
3135
f7583c76 3136/* Get call's extra defs and uses (track caller-saved registers). */
e011eba9 3137
3138static void
f7583c76 3139df_get_call_refs (struct df_collection_rec *collection_rec,
48e1416a 3140 basic_block bb,
158b6cc9 3141 struct df_insn_info *insn_info,
b9c74b4d 3142 int flags)
e011eba9 3143{
3072d30e 3144 rtx note;
3072d30e 3145 bool is_sibling_call;
3146 unsigned int i;
f7583c76 3147 HARD_REG_SET defs_generated;
754158d9 3148 HARD_REG_SET fn_reg_set_usage;
4b5a4301 3149
f7583c76 3150 CLEAR_HARD_REG_SET (defs_generated);
3151 df_find_hard_reg_defs (PATTERN (insn_info->insn), &defs_generated);
3152 is_sibling_call = SIBLING_CALL_P (insn_info->insn);
754158d9 3153 get_call_reg_set_usage (insn_info->insn, &fn_reg_set_usage,
3154 regs_invalidated_by_call);
e011eba9 3155
f7583c76 3156 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3157 {
3158 if (i == STACK_POINTER_REGNUM)
3159 /* The stack ptr is used (honorarily) by a CALL insn. */
3160 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i],
3161 NULL, bb, insn_info, DF_REF_REG_USE,
3162 DF_REF_CALL_STACK_USAGE | flags);
3163 else if (global_regs[i])
3164 {
3165 /* Calls to const functions cannot access any global registers and
3166 calls to pure functions cannot set them. All other calls may
3167 reference any of the global registers, so they are recorded as
3168 used. */
3169 if (!RTL_CONST_CALL_P (insn_info->insn))
3170 {
3171 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i],
3172 NULL, bb, insn_info, DF_REF_REG_USE, flags);
3173 if (!RTL_PURE_CALL_P (insn_info->insn))
3174 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i],
3175 NULL, bb, insn_info, DF_REF_REG_DEF, flags);
3176 }
3177 }
754158d9 3178 else if (TEST_HARD_REG_BIT (fn_reg_set_usage, i)
f7583c76 3179 /* no clobbers for regs that are the result of the call */
3180 && !TEST_HARD_REG_BIT (defs_generated, i)
3181 && (!is_sibling_call
3182 || !bitmap_bit_p (df->exit_block_uses, i)
2ec77a7c 3183 || refers_to_regno_p (i, crtl->return_rtx)))
f7583c76 3184 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i],
3185 NULL, bb, insn_info, DF_REF_REG_DEF,
3186 DF_REF_MAY_CLOBBER | flags);
3187 }
e011eba9 3188
3072d30e 3189 /* Record the registers used to pass arguments, and explicitly
3190 noted as clobbered. */
158b6cc9 3191 for (note = CALL_INSN_FUNCTION_USAGE (insn_info->insn); note;
3072d30e 3192 note = XEXP (note, 1))
3193 {
3194 if (GET_CODE (XEXP (note, 0)) == USE)
c989ecc1 3195 df_uses_record (collection_rec, &XEXP (XEXP (note, 0), 0),
3196 DF_REF_REG_USE, bb, insn_info, flags);
3072d30e 3197 else if (GET_CODE (XEXP (note, 0)) == CLOBBER)
3198 {
5736c998 3199 if (REG_P (XEXP (XEXP (note, 0), 0)))
3200 {
3201 unsigned int regno = REGNO (XEXP (XEXP (note, 0), 0));
f7583c76 3202 if (!TEST_HARD_REG_BIT (defs_generated, regno))
5736c998 3203 df_defs_record (collection_rec, XEXP (note, 0), bb,
158b6cc9 3204 insn_info, flags);
5736c998 3205 }
3206 else
c989ecc1 3207 df_uses_record (collection_rec, &XEXP (note, 0),
3208 DF_REF_REG_USE, bb, insn_info, flags);
3072d30e 3209 }
3210 }
e011eba9 3211
3072d30e 3212 return;
3213}
e011eba9 3214
3072d30e 3215/* Collect all refs in the INSN. This function is free of any
3216 side-effect - it will create and return a lists of df_ref's in the
3217 COLLECTION_REC without putting those refs into existing ref chains
3218 and reg chains. */
e011eba9 3219
3072d30e 3220static void
f7583c76 3221df_insn_refs_collect (struct df_collection_rec *collection_rec,
48e1416a 3222 basic_block bb, struct df_insn_info *insn_info)
3072d30e 3223{
3224 rtx note;
158b6cc9 3225 bool is_cond_exec = (GET_CODE (PATTERN (insn_info->insn)) == COND_EXEC);
e011eba9 3226
3072d30e 3227 /* Clear out the collection record. */
f1f41a6c 3228 collection_rec->def_vec.truncate (0);
3229 collection_rec->use_vec.truncate (0);
3230 collection_rec->eq_use_vec.truncate (0);
3231 collection_rec->mw_vec.truncate (0);
e011eba9 3232
158b6cc9 3233 /* Process REG_EQUIV/REG_EQUAL notes. */
3234 for (note = REG_NOTES (insn_info->insn); note;
3072d30e 3235 note = XEXP (note, 1))
3236 {
3237 switch (REG_NOTE_KIND (note))
3238 {
3239 case REG_EQUIV:
3240 case REG_EQUAL:
c989ecc1 3241 df_uses_record (collection_rec,
3072d30e 3242 &XEXP (note, 0), DF_REF_REG_USE,
c989ecc1 3243 bb, insn_info, DF_REF_IN_NOTE);
3072d30e 3244 break;
3245 case REG_NON_LOCAL_GOTO:
3246 /* The frame ptr is used by a non-local goto. */
ed6e85ae 3247 df_ref_record (DF_REF_BASE, collection_rec,
3072d30e 3248 regno_reg_rtx[FRAME_POINTER_REGNUM],
158b6cc9 3249 NULL, bb, insn_info,
c989ecc1 3250 DF_REF_REG_USE, 0);
f703b3d6 3251 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3252 df_ref_record (DF_REF_BASE, collection_rec,
3253 regno_reg_rtx[HARD_FRAME_POINTER_REGNUM],
3254 NULL, bb, insn_info,
3255 DF_REF_REG_USE, 0);
3072d30e 3256 break;
3257 default:
3258 break;
3259 }
e011eba9 3260 }
3072d30e 3261
f7583c76 3262 /* For CALL_INSNs, first record DF_REF_BASE register defs, as well as
3263 uses from CALL_INSN_FUNCTION_USAGE. */
158b6cc9 3264 if (CALL_P (insn_info->insn))
48e1416a 3265 df_get_call_refs (collection_rec, bb, insn_info,
3072d30e 3266 (is_cond_exec) ? DF_REF_CONDITIONAL : 0);
3267
f7583c76 3268 /* Record other defs. These should be mostly for DF_REF_REGULAR, so
3269 that a qsort on the defs is unnecessary in most cases. */
3270 df_defs_record (collection_rec,
3271 PATTERN (insn_info->insn), bb, insn_info, 0);
3272
3072d30e 3273 /* Record the register uses. */
c989ecc1 3274 df_uses_record (collection_rec,
3275 &PATTERN (insn_info->insn), DF_REF_REG_USE, bb, insn_info, 0);
3072d30e 3276
3277 /* DF_REF_CONDITIONAL needs corresponding USES. */
3278 if (is_cond_exec)
3279 df_get_conditional_uses (collection_rec);
3280
3281 df_canonize_collection_rec (collection_rec);
e011eba9 3282}
3283
3072d30e 3284/* Recompute the luids for the insns in BB. */
3285
3286void
3287df_recompute_luids (basic_block bb)
e011eba9 3288{
7a6a083c 3289 rtx_insn *insn;
e011eba9 3290 int luid = 0;
3e6933a8 3291
3072d30e 3292 df_grow_insn_info ();
e011eba9 3293
3294 /* Scan the block an insn at a time from beginning to end. */
3295 FOR_BB_INSNS (bb, insn)
3296 {
158b6cc9 3297 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
3072d30e 3298 /* Inserting labels does not always trigger the incremental
3299 rescanning. */
3300 if (!insn_info)
e011eba9 3301 {
3072d30e 3302 gcc_assert (!INSN_P (insn));
158b6cc9 3303 insn_info = df_insn_create_insn_record (insn);
e011eba9 3304 }
3072d30e 3305
158b6cc9 3306 DF_INSN_INFO_LUID (insn_info) = luid;
3072d30e 3307 if (INSN_P (insn))
3308 luid++;
3309 }
3310}
3311
3312
3072d30e 3313/* Collect all artificial refs at the block level for BB and add them
3314 to COLLECTION_REC. */
3315
3316static void
3317df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
3318{
f1f41a6c 3319 collection_rec->def_vec.truncate (0);
3320 collection_rec->use_vec.truncate (0);
3321 collection_rec->eq_use_vec.truncate (0);
3322 collection_rec->mw_vec.truncate (0);
3072d30e 3323
3324 if (bb->index == ENTRY_BLOCK)
3325 {
3326 df_entry_block_defs_collect (collection_rec, df->entry_block_defs);
3327 return;
3328 }
3329 else if (bb->index == EXIT_BLOCK)
3330 {
3331 df_exit_block_uses_collect (collection_rec, df->exit_block_uses);
3332 return;
e011eba9 3333 }
3334
dea7b504 3335 if (bb_has_eh_pred (bb))
e011eba9 3336 {
3337 unsigned int i;
3338 /* Mark the registers that will contain data for the handler. */
fcf2ad9f 3339 for (i = 0; ; ++i)
3340 {
3341 unsigned regno = EH_RETURN_DATA_REGNO (i);
3342 if (regno == INVALID_REGNUM)
3343 break;
ed6e85ae 3344 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[regno], NULL,
c989ecc1 3345 bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP);
fcf2ad9f 3346 }
e011eba9 3347 }
e011eba9 3348
3072d30e 3349 /* Add the hard_frame_pointer if this block is the target of a
3350 non-local goto. */
3351 if (bb->flags & BB_NON_LOCAL_GOTO_TARGET)
ed6e85ae 3352 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, hard_frame_pointer_rtx, NULL,
c989ecc1 3353 bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP);
48e1416a 3354
3072d30e 3355 /* Add the artificial uses. */
3356 if (bb->index >= NUM_FIXED_BLOCKS)
3e6933a8 3357 {
3358 bitmap_iterator bi;
3359 unsigned int regno;
48e1416a 3360 bitmap au = bb_has_eh_pred (bb)
4b5a4301 3361 ? &df->eh_block_artificial_uses
3362 : &df->regular_block_artificial_uses;
3e6933a8 3363
3072d30e 3364 EXECUTE_IF_SET_IN_BITMAP (au, 0, regno, bi)
3e6933a8 3365 {
ed6e85ae 3366 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[regno], NULL,
c989ecc1 3367 bb, NULL, DF_REF_REG_USE, 0);
3e6933a8 3368 }
e011eba9 3369 }
3072d30e 3370
3371 df_canonize_collection_rec (collection_rec);
e011eba9 3372}
3373
05268a5f 3374
3072d30e 3375/* Record all the refs within the basic block BB_INDEX and scan the instructions if SCAN_INSNS. */
3376
3377void
3378df_bb_refs_record (int bb_index, bool scan_insns)
05268a5f 3379{
f5a6b05f 3380 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7a6a083c 3381 rtx_insn *insn;
3072d30e 3382 int luid = 0;
05268a5f 3383
3072d30e 3384 if (!df)
05268a5f 3385 return;
3386
d70aebca 3387 df_collection_rec collection_rec;
369ea98d 3388 df_grow_bb_info (df_scan);
3072d30e 3389 if (scan_insns)
3390 /* Scan the block an insn at a time from beginning to end. */
3391 FOR_BB_INSNS (bb, insn)
3392 {
158b6cc9 3393 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
3072d30e 3394 gcc_assert (!insn_info);
3395
158b6cc9 3396 insn_info = df_insn_create_insn_record (insn);
3072d30e 3397 if (INSN_P (insn))
3398 {
3399 /* Record refs within INSN. */
158b6cc9 3400 DF_INSN_INFO_LUID (insn_info) = luid++;
3401 df_insn_refs_collect (&collection_rec, bb, DF_INSN_INFO_GET (insn));
d70aebca 3402 df_refs_add_to_chains (&collection_rec, bb, insn, copy_all);
3072d30e 3403 }
158b6cc9 3404 DF_INSN_INFO_LUID (insn_info) = luid;
3072d30e 3405 }
3406
3407 /* Other block level artificial refs */
3408 df_bb_refs_collect (&collection_rec, bb);
d70aebca 3409 df_refs_add_to_chains (&collection_rec, bb, NULL, copy_all);
f7f05a07 3410
3072d30e 3411 /* Now that the block has been processed, set the block as dirty so
84da8954 3412 LR and LIVE will get it processed. */
3072d30e 3413 df_set_bb_dirty (bb);
05268a5f 3414}
e011eba9 3415
3072d30e 3416
3417/* Get the artificial use set for a regular (i.e. non-exit/non-entry)
3418 block. */
e011eba9 3419
3420static void
3072d30e 3421df_get_regular_block_artificial_uses (bitmap regular_block_artificial_uses)
e011eba9 3422{
f7039b33 3423#ifdef EH_USES
3424 unsigned int i;
3425#endif
3426
3072d30e 3427 bitmap_clear (regular_block_artificial_uses);
e011eba9 3428
3072d30e 3429 if (reload_completed)
e011eba9 3430 {
3072d30e 3431 if (frame_pointer_needed)
3432 bitmap_set_bit (regular_block_artificial_uses, HARD_FRAME_POINTER_REGNUM);
3433 }
3434 else
3435 /* Before reload, there are a few registers that must be forced
3436 live everywhere -- which might not already be the case for
3437 blocks within infinite loops. */
3438 {
89bc53e2 3439 unsigned int picreg = PIC_OFFSET_TABLE_REGNUM;
3440
3072d30e 3441 /* Any reference to any pseudo before reload is a potential
3442 reference of the frame pointer. */
3443 bitmap_set_bit (regular_block_artificial_uses, FRAME_POINTER_REGNUM);
48e1416a 3444
f703b3d6 3445 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3446 bitmap_set_bit (regular_block_artificial_uses,
3447 HARD_FRAME_POINTER_REGNUM);
3072d30e 3448
3449#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3450 /* Pseudos with argument area equivalences may require
3451 reloading via the argument pointer. */
3452 if (fixed_regs[ARG_POINTER_REGNUM])
3453 bitmap_set_bit (regular_block_artificial_uses, ARG_POINTER_REGNUM);
3454#endif
48e1416a 3455
3072d30e 3456 /* Any constant, or pseudo with constant equivalences, may
3457 require reloading from memory using the pic register. */
89bc53e2 3458 if (picreg != INVALID_REGNUM
3459 && fixed_regs[picreg])
3460 bitmap_set_bit (regular_block_artificial_uses, picreg);
e011eba9 3461 }
3072d30e 3462 /* The all-important stack pointer must always be live. */
3463 bitmap_set_bit (regular_block_artificial_uses, STACK_POINTER_REGNUM);
f7039b33 3464
3465#ifdef EH_USES
3466 /* EH_USES registers are used:
3467 1) at all insns that might throw (calls or with -fnon-call-exceptions
3468 trapping insns)
3469 2) in all EH edges
3470 3) to support backtraces and/or debugging, anywhere between their
3471 initialization and where they the saved registers are restored
3472 from them, including the cases where we don't reach the epilogue
3473 (noreturn call or infinite loop). */
3474 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3475 if (EH_USES (i))
3476 bitmap_set_bit (regular_block_artificial_uses, i);
3477#endif
3072d30e 3478}
3479
e011eba9 3480
3072d30e 3481/* Get the artificial use set for an eh block. */
fcf2ad9f 3482
3072d30e 3483static void
3484df_get_eh_block_artificial_uses (bitmap eh_block_artificial_uses)
3485{
3486 bitmap_clear (eh_block_artificial_uses);
05268a5f 3487
9d75589a 3488 /* The following code (down through the arg_pointer setting APPEARS
3072d30e 3489 to be necessary because there is nothing that actually
3490 describes what the exception handling code may actually need
3491 to keep alive. */
3492 if (reload_completed)
3493 {
3494 if (frame_pointer_needed)
3495 {
3496 bitmap_set_bit (eh_block_artificial_uses, FRAME_POINTER_REGNUM);
f703b3d6 3497 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3498 bitmap_set_bit (eh_block_artificial_uses,
3499 HARD_FRAME_POINTER_REGNUM);
3072d30e 3500 }
3501#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3502 if (fixed_regs[ARG_POINTER_REGNUM])
3503 bitmap_set_bit (eh_block_artificial_uses, ARG_POINTER_REGNUM);
3504#endif
3505 }
e011eba9 3506}
3507
3508
3072d30e 3509\f
e011eba9 3510/*----------------------------------------------------------------------------
3511 Specialized hard register scanning functions.
3512----------------------------------------------------------------------------*/
3513
3072d30e 3514
e011eba9 3515/* Mark a register in SET. Hard registers in large modes get all
3516 of their component registers set as well. */
3517
3518static void
3519df_mark_reg (rtx reg, void *vset)
3520{
3521 bitmap set = (bitmap) vset;
3522 int regno = REGNO (reg);
3523
3524 gcc_assert (GET_MODE (reg) != BLKmode);
3525
e011eba9 3526 if (regno < FIRST_PSEUDO_REGISTER)
0933f1d9 3527 bitmap_set_range (set, regno, REG_NREGS (reg));
76d2e170 3528 else
3529 bitmap_set_bit (set, regno);
e011eba9 3530}
3531
fcf2ad9f 3532
3072d30e 3533/* Set the bit for regs that are considered being defined at the entry. */
fcf2ad9f 3534
3535static void
3072d30e 3536df_get_entry_block_def_set (bitmap entry_block_defs)
fcf2ad9f 3537{
fcf2ad9f 3538 rtx r;
3072d30e 3539 int i;
fcf2ad9f 3540
3072d30e 3541 bitmap_clear (entry_block_defs);
fcf2ad9f 3542
3543 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
925e8609 3544 {
3545 if (global_regs[i])
3546 bitmap_set_bit (entry_block_defs, i);
3547 if (FUNCTION_ARG_REGNO_P (i))
3548 bitmap_set_bit (entry_block_defs, INCOMING_REGNO (i));
3549 }
48e1416a 3550
df4fadd5 3551 /* The always important stack pointer. */
3552 bitmap_set_bit (entry_block_defs, STACK_POINTER_REGNUM);
3553
fcf2ad9f 3554 /* Once the prologue has been generated, all of these registers
3555 should just show up in the first regular block. */
3556 if (HAVE_prologue && epilogue_completed)
3557 {
3558 /* Defs for the callee saved registers are inserted so that the
3559 pushes have some defining location. */
3560 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3072d30e 3561 if ((call_used_regs[i] == 0) && (df_regs_ever_live_p (i)))
3562 bitmap_set_bit (entry_block_defs, i);
fcf2ad9f 3563 }
fcf2ad9f 3564
652ff291 3565 r = targetm.calls.struct_value_rtx (current_function_decl, true);
3566 if (r && REG_P (r))
3567 bitmap_set_bit (entry_block_defs, REGNO (r));
3568
82c7907c 3569 /* If the function has an incoming STATIC_CHAIN, it has to show up
3570 in the entry def set. */
3571 r = targetm.calls.static_chain (current_function_decl, true);
3572 if (r && REG_P (r))
3573 bitmap_set_bit (entry_block_defs, REGNO (r));
3574
3e6933a8 3575 if ((!reload_completed) || frame_pointer_needed)
fcf2ad9f 3576 {
3577 /* Any reference to any pseudo before reload is a potential
3578 reference of the frame pointer. */
3072d30e 3579 bitmap_set_bit (entry_block_defs, FRAME_POINTER_REGNUM);
f703b3d6 3580
3e6933a8 3581 /* If they are different, also mark the hard frame pointer as live. */
f703b3d6 3582 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
3583 && !LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
3072d30e 3584 bitmap_set_bit (entry_block_defs, HARD_FRAME_POINTER_REGNUM);
3e6933a8 3585 }
fcf2ad9f 3586
3e6933a8 3587 /* These registers are live everywhere. */
3588 if (!reload_completed)
3589 {
fcf2ad9f 3590#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3591 /* Pseudos with argument area equivalences may require
3592 reloading via the argument pointer. */
3593 if (fixed_regs[ARG_POINTER_REGNUM])
3072d30e 3594 bitmap_set_bit (entry_block_defs, ARG_POINTER_REGNUM);
fcf2ad9f 3595#endif
48e1416a 3596
fcf2ad9f 3597 /* Any constant, or pseudo with constant equivalences, may
3598 require reloading from memory using the pic register. */
099a99b5 3599 unsigned int picreg = PIC_OFFSET_TABLE_REGNUM;
89bc53e2 3600 if (picreg != INVALID_REGNUM
3601 && fixed_regs[picreg])
3602 bitmap_set_bit (entry_block_defs, picreg);
3072d30e 3603 }
3604
3605#ifdef INCOMING_RETURN_ADDR_RTX
3606 if (REG_P (INCOMING_RETURN_ADDR_RTX))
3607 bitmap_set_bit (entry_block_defs, REGNO (INCOMING_RETURN_ADDR_RTX));
3608#endif
48e1416a 3609
202d6e5f 3610 targetm.extra_live_on_entry (entry_block_defs);
3072d30e 3611}
3612
3613
3614/* Return the (conservative) set of hard registers that are defined on
48e1416a 3615 entry to the function.
3616 It uses df->entry_block_defs to determine which register
3072d30e 3617 reference to include. */
3618
3619static void
48e1416a 3620df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
3072d30e 3621 bitmap entry_block_defs)
3622{
48e1416a 3623 unsigned int i;
3072d30e 3624 bitmap_iterator bi;
3625
3626 EXECUTE_IF_SET_IN_BITMAP (entry_block_defs, 0, i, bi)
3627 {
48e1416a 3628 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
34154e27 3629 ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_DEF, 0);
3072d30e 3630 }
3631
3632 df_canonize_collection_rec (collection_rec);
3633}
3634
3635
3636/* Record the (conservative) set of hard registers that are defined on
3637 entry to the function. */
3638
3639static void
3640df_record_entry_block_defs (bitmap entry_block_defs)
3641{
3642 struct df_collection_rec collection_rec;
3072d30e 3643 df_entry_block_defs_collect (&collection_rec, entry_block_defs);
3644
3645 /* Process bb_refs chain */
f5a6b05f 3646 df_refs_add_to_chains (&collection_rec,
3647 BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK),
3648 NULL,
d70aebca 3649 copy_defs);
3072d30e 3650}
3651
3652
becfaa62 3653/* Update the defs in the entry block. */
3072d30e 3654
3655void
3656df_update_entry_block_defs (void)
3657{
4b5a4301 3658 bitmap_head refs;
3072d30e 3659 bool changed = false;
fcf2ad9f 3660
4b5a4301 3661 bitmap_initialize (&refs, &df_bitmap_obstack);
3662 df_get_entry_block_def_set (&refs);
3072d30e 3663 if (df->entry_block_defs)
3664 {
4b5a4301 3665 if (!bitmap_equal_p (df->entry_block_defs, &refs))
3072d30e 3666 {
3667 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (ENTRY_BLOCK);
3668 df_ref_chain_delete_du_chain (bb_info->artificial_defs);
3669 df_ref_chain_delete (bb_info->artificial_defs);
3670 bb_info->artificial_defs = NULL;
3671 changed = true;
3672 }
3673 }
3674 else
3675 {
3676 struct df_scan_problem_data *problem_data
3677 = (struct df_scan_problem_data *) df_scan->problem_data;
4b5a4301 3678 gcc_unreachable ();
3072d30e 3679 df->entry_block_defs = BITMAP_ALLOC (&problem_data->reg_bitmaps);
3680 changed = true;
3681 }
fcf2ad9f 3682
3072d30e 3683 if (changed)
fcf2ad9f 3684 {
4b5a4301 3685 df_record_entry_block_defs (&refs);
3686 bitmap_copy (df->entry_block_defs, &refs);
f5a6b05f 3687 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK));
fcf2ad9f 3688 }
4b5a4301 3689 bitmap_clear (&refs);
fcf2ad9f 3690}
3691
3692
3072d30e 3693/* Set the bit for regs that are considered being used at the exit. */
e011eba9 3694
3695static void
3072d30e 3696df_get_exit_block_use_set (bitmap exit_block_uses)
e011eba9 3697{
48e1416a 3698 unsigned int i;
89bc53e2 3699 unsigned int picreg = PIC_OFFSET_TABLE_REGNUM;
e011eba9 3700
3072d30e 3701 bitmap_clear (exit_block_uses);
bff9eb26 3702
3703 /* Stack pointer is always live at the exit. */
3704 bitmap_set_bit (exit_block_uses, STACK_POINTER_REGNUM);
48e1416a 3705
e011eba9 3706 /* Mark the frame pointer if needed at the end of the function.
3707 If we end up eliminating it, it will be removed from the live
3708 list of each basic block by reload. */
48e1416a 3709
3e6933a8 3710 if ((!reload_completed) || frame_pointer_needed)
e011eba9 3711 {
3072d30e 3712 bitmap_set_bit (exit_block_uses, FRAME_POINTER_REGNUM);
f703b3d6 3713
e011eba9 3714 /* If they are different, also mark the hard frame pointer as live. */
f703b3d6 3715 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER
3716 && !LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
3072d30e 3717 bitmap_set_bit (exit_block_uses, HARD_FRAME_POINTER_REGNUM);
e011eba9 3718 }
3719
e011eba9 3720 /* Many architectures have a GP register even without flag_pic.
3721 Assume the pic register is not in use, or will be handled by
3722 other means, if it is not fixed. */
260e669e 3723 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
89bc53e2 3724 && picreg != INVALID_REGNUM
3725 && fixed_regs[picreg])
3726 bitmap_set_bit (exit_block_uses, picreg);
48e1416a 3727
e011eba9 3728 /* Mark all global registers, and all registers used by the
3729 epilogue as being live at the end of the function since they
3730 may be referenced by our caller. */
3731 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3732 if (global_regs[i] || EPILOGUE_USES (i))
3072d30e 3733 bitmap_set_bit (exit_block_uses, i);
48e1416a 3734
e011eba9 3735 if (HAVE_epilogue && epilogue_completed)
3736 {
3737 /* Mark all call-saved registers that we actually used. */
3738 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3072d30e 3739 if (df_regs_ever_live_p (i) && !LOCAL_REGNO (i)
3e6933a8 3740 && !TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3072d30e 3741 bitmap_set_bit (exit_block_uses, i);
e011eba9 3742 }
48e1416a 3743
e011eba9 3744 /* Mark the registers that will contain data for the handler. */
18d50ae6 3745 if (reload_completed && crtl->calls_eh_return)
e011eba9 3746 for (i = 0; ; ++i)
3747 {
3748 unsigned regno = EH_RETURN_DATA_REGNO (i);
3749 if (regno == INVALID_REGNUM)
3750 break;
3072d30e 3751 bitmap_set_bit (exit_block_uses, regno);
e011eba9 3752 }
e011eba9 3753
3754#ifdef EH_RETURN_STACKADJ_RTX
3e6933a8 3755 if ((!HAVE_epilogue || ! epilogue_completed)
18d50ae6 3756 && crtl->calls_eh_return)
e011eba9 3757 {
3758 rtx tmp = EH_RETURN_STACKADJ_RTX;
3759 if (tmp && REG_P (tmp))
3072d30e 3760 df_mark_reg (tmp, exit_block_uses);
e011eba9 3761 }
3762#endif
3763
3764#ifdef EH_RETURN_HANDLER_RTX
3e6933a8 3765 if ((!HAVE_epilogue || ! epilogue_completed)
18d50ae6 3766 && crtl->calls_eh_return)
e011eba9 3767 {
3768 rtx tmp = EH_RETURN_HANDLER_RTX;
3769 if (tmp && REG_P (tmp))
3072d30e 3770 df_mark_reg (tmp, exit_block_uses);
e011eba9 3771 }
48e1416a 3772#endif
3072d30e 3773
e011eba9 3774 /* Mark function return value. */
3072d30e 3775 diddle_return_value (df_mark_reg, (void*) exit_block_uses);
3776}
3777
3778
48e1416a 3779/* Return the refs of hard registers that are used in the exit block.
3072d30e 3780 It uses df->exit_block_uses to determine register to include. */
3781
3782static void
3783df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exit_block_uses)
3784{
48e1416a 3785 unsigned int i;
3072d30e 3786 bitmap_iterator bi;
3787
3788 EXECUTE_IF_SET_IN_BITMAP (exit_block_uses, 0, i, bi)
ed6e85ae 3789 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL,
34154e27 3790 EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
e011eba9 3791
3072d30e 3792#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3793 /* It is deliberate that this is not put in the exit block uses but
3794 I do not know why. */
48e1416a 3795 if (reload_completed
3072d30e 3796 && !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM)
34154e27 3797 && bb_has_eh_pred (EXIT_BLOCK_PTR_FOR_FN (cfun))
3072d30e 3798 && fixed_regs[ARG_POINTER_REGNUM])
ed6e85ae 3799 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[ARG_POINTER_REGNUM], NULL,
34154e27 3800 EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, DF_REF_REG_USE, 0);
3072d30e 3801#endif
3802
3803 df_canonize_collection_rec (collection_rec);
3804}
3805
3806
48e1416a 3807/* Record the set of hard registers that are used in the exit block.
3072d30e 3808 It uses df->exit_block_uses to determine which bit to include. */
3809
3810static void
3811df_record_exit_block_uses (bitmap exit_block_uses)
3812{
3813 struct df_collection_rec collection_rec;
3072d30e 3814 df_exit_block_uses_collect (&collection_rec, exit_block_uses);
3815
3816 /* Process bb_refs chain */
f5a6b05f 3817 df_refs_add_to_chains (&collection_rec,
3818 BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK),
3819 NULL,
d70aebca 3820 copy_uses);
3072d30e 3821}
3822
3823
3824/* Update the uses in the exit block. */
3825
3826void
3827df_update_exit_block_uses (void)
3828{
4b5a4301 3829 bitmap_head refs;
3072d30e 3830 bool changed = false;
3831
4b5a4301 3832 bitmap_initialize (&refs, &df_bitmap_obstack);
3833 df_get_exit_block_use_set (&refs);
3072d30e 3834 if (df->exit_block_uses)
3835 {
4b5a4301 3836 if (!bitmap_equal_p (df->exit_block_uses, &refs))
3072d30e 3837 {
3838 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (EXIT_BLOCK);
3839 df_ref_chain_delete_du_chain (bb_info->artificial_uses);
3840 df_ref_chain_delete (bb_info->artificial_uses);
3841 bb_info->artificial_uses = NULL;
3842 changed = true;
3843 }
3844 }
3845 else
3846 {
3847 struct df_scan_problem_data *problem_data
3848 = (struct df_scan_problem_data *) df_scan->problem_data;
4b5a4301 3849 gcc_unreachable ();
3072d30e 3850 df->exit_block_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps);
3851 changed = true;
3852 }
3853
3854 if (changed)
3855 {
4b5a4301 3856 df_record_exit_block_uses (&refs);
3857 bitmap_copy (df->exit_block_uses,& refs);
f5a6b05f 3858 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
3072d30e 3859 }
4b5a4301 3860 bitmap_clear (&refs);
e011eba9 3861}
3862
3863static bool initialized = false;
3864
3072d30e 3865
e011eba9 3866/* Initialize some platform specific structures. */
3867
48e1416a 3868void
e011eba9 3869df_hard_reg_init (void)
3870{
bebf8106 3871#ifdef ELIMINABLE_REGS
f9a42947 3872 int i;
e011eba9 3873 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
3874#endif
e011eba9 3875 if (initialized)
3876 return;
3877
e011eba9 3878 /* Record which registers will be eliminated. We use this in
3879 mark_used_regs. */
3880 CLEAR_HARD_REG_SET (elim_reg_set);
48e1416a 3881
e011eba9 3882#ifdef ELIMINABLE_REGS
3883 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
3884 SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
3885#else
3886 SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
3887#endif
48e1416a 3888
e011eba9 3889 initialized = true;
3890}
3072d30e 3891
3892
3893/* Recompute the parts of scanning that are based on regs_ever_live
48e1416a 3894 because something changed in that array. */
3072d30e 3895
48e1416a 3896void
3072d30e 3897df_update_entry_exit_and_calls (void)
3898{
3899 basic_block bb;
3900
3901 df_update_entry_block_defs ();
3902 df_update_exit_block_uses ();
3903
3904 /* The call insns need to be rescanned because there may be changes
3905 in the set of registers clobbered across the call. */
fc00614f 3906 FOR_EACH_BB_FN (bb, cfun)
3072d30e 3907 {
7a6a083c 3908 rtx_insn *insn;
3072d30e 3909 FOR_BB_INSNS (bb, insn)
3910 {
3911 if (INSN_P (insn) && CALL_P (insn))
3912 df_insn_rescan (insn);
3913 }
3914 }
3915}
3916
3917
3918/* Return true if hard REG is actually used in the some instruction.
3919 There are a fair number of conditions that affect the setting of
3920 this array. See the comment in df.h for df->hard_regs_live_count
3921 for the conditions that this array is set. */
3922
48e1416a 3923bool
3072d30e 3924df_hard_reg_used_p (unsigned int reg)
3925{
3072d30e 3926 return df->hard_regs_live_count[reg] != 0;
3927}
3928
3929
3930/* A count of the number of times REG is actually used in the some
3931 instruction. There are a fair number of conditions that affect the
3932 setting of this array. See the comment in df.h for
3933 df->hard_regs_live_count for the conditions that this array is
3934 set. */
3935
3936
3937unsigned int
3938df_hard_reg_used_count (unsigned int reg)
3939{
3072d30e 3940 return df->hard_regs_live_count[reg];
3941}
3942
3943
3944/* Get the value of regs_ever_live[REGNO]. */
3945
48e1416a 3946bool
3072d30e 3947df_regs_ever_live_p (unsigned int regno)
3948{
3949 return regs_ever_live[regno];
3950}
3951
3952
3953/* Set regs_ever_live[REGNO] to VALUE. If this cause regs_ever_live
3954 to change, schedule that change for the next update. */
3955
48e1416a 3956void
3072d30e 3957df_set_regs_ever_live (unsigned int regno, bool value)
3958{
3959 if (regs_ever_live[regno] == value)
3960 return;
3961
3962 regs_ever_live[regno] = value;
3963 if (df)
3964 df->redo_entry_and_exit = true;
3965}
3966
3967
3968/* Compute "regs_ever_live" information from the underlying df
3969 information. Set the vector to all false if RESET. */
3970
3971void
3972df_compute_regs_ever_live (bool reset)
3973{
3974 unsigned int i;
3975 bool changed = df->redo_entry_and_exit;
48e1416a 3976
3072d30e 3977 if (reset)
3978 memset (regs_ever_live, 0, sizeof (regs_ever_live));
3979
3980 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3981 if ((!regs_ever_live[i]) && df_hard_reg_used_p (i))
3982 {
3983 regs_ever_live[i] = true;
3984 changed = true;
3985 }
3986 if (changed)
3987 df_update_entry_exit_and_calls ();
3988 df->redo_entry_and_exit = false;
3989}
3990
3991\f
3992/*----------------------------------------------------------------------------
3993 Dataflow ref information verification functions.
3994
3995 df_reg_chain_mark (refs, regno, is_def, is_eq_use)
3996 df_reg_chain_verify_unmarked (refs)
f1f41a6c 3997 df_refs_verify (vec<stack, va_df_ref>, ref*, bool)
3072d30e 3998 df_mws_verify (mw*, mw*, bool)
3999 df_insn_refs_verify (collection_rec, bb, insn, bool)
4000 df_bb_refs_verify (bb, refs, bool)
4001 df_bb_verify (bb)
4002 df_exit_block_bitmap_verify (bool)
4003 df_entry_block_bitmap_verify (bool)
4004 df_scan_verify ()
4005----------------------------------------------------------------------------*/
4006
4007
4008/* Mark all refs in the reg chain. Verify that all of the registers
48e1416a 4009are in the correct chain. */
3072d30e 4010
4011static unsigned int
48e1416a 4012df_reg_chain_mark (df_ref refs, unsigned int regno,
3072d30e 4013 bool is_def, bool is_eq_use)
4014{
4015 unsigned int count = 0;
ed6e85ae 4016 df_ref ref;
3072d30e 4017 for (ref = refs; ref; ref = DF_REF_NEXT_REG (ref))
4018 {
4019 gcc_assert (!DF_REF_IS_REG_MARKED (ref));
4020
4021 /* If there are no def-use or use-def chains, make sure that all
4022 of the chains are clear. */
4023 if (!df_chain)
4024 gcc_assert (!DF_REF_CHAIN (ref));
4025
4026 /* Check to make sure the ref is in the correct chain. */
4027 gcc_assert (DF_REF_REGNO (ref) == regno);
4028 if (is_def)
ed6e85ae 4029 gcc_assert (DF_REF_REG_DEF_P (ref));
3072d30e 4030 else
ed6e85ae 4031 gcc_assert (!DF_REF_REG_DEF_P (ref));
3072d30e 4032
4033 if (is_eq_use)
4034 gcc_assert ((DF_REF_FLAGS (ref) & DF_REF_IN_NOTE));
4035 else
4036 gcc_assert ((DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) == 0);
4037
ed6e85ae 4038 if (DF_REF_NEXT_REG (ref))
4039 gcc_assert (DF_REF_PREV_REG (DF_REF_NEXT_REG (ref)) == ref);
3072d30e 4040 count++;
4041 DF_REF_REG_MARK (ref);
4042 }
4043 return count;
4044}
4045
4046
48e1416a 4047/* Verify that all of the registers in the chain are unmarked. */
3072d30e 4048
4049static void
ed6e85ae 4050df_reg_chain_verify_unmarked (df_ref refs)
3072d30e 4051{
ed6e85ae 4052 df_ref ref;
3072d30e 4053 for (ref = refs; ref; ref = DF_REF_NEXT_REG (ref))
4054 gcc_assert (!DF_REF_IS_REG_MARKED (ref));
4055}
4056
4057
4058/* Verify that NEW_REC and OLD_REC have exactly the same members. */
4059
4060static bool
ddc2d0e3 4061df_refs_verify (const vec<df_ref, va_heap> *new_rec, df_ref old_rec,
3072d30e 4062 bool abort_if_fail)
4063{
f7f05a07 4064 unsigned int ix;
4065 df_ref new_ref;
4066
d70aebca 4067 FOR_EACH_VEC_ELT (*new_rec, ix, new_ref)
3072d30e 4068 {
ddc2d0e3 4069 if (old_rec == NULL || !df_ref_equal_p (new_ref, old_rec))
3072d30e 4070 {
4071 if (abort_if_fail)
4072 gcc_assert (0);
4073 else
4074 return false;
4075 }
4076
4077 /* Abort if fail is called from the function level verifier. If
4078 that is the context, mark this reg as being seem. */
4079 if (abort_if_fail)
4080 {
ddc2d0e3 4081 gcc_assert (DF_REF_IS_REG_MARKED (old_rec));
4082 DF_REF_REG_UNMARK (old_rec);
3072d30e 4083 }
4084
ddc2d0e3 4085 old_rec = DF_REF_NEXT_LOC (old_rec);
3072d30e 4086 }
4087
4088 if (abort_if_fail)
ddc2d0e3 4089 gcc_assert (old_rec == NULL);
3072d30e 4090 else
ddc2d0e3 4091 return old_rec == NULL;
3072d30e 4092 return false;
4093}
4094
4095
4096/* Verify that NEW_REC and OLD_REC have exactly the same members. */
4097
4098static bool
d70aebca 4099df_mws_verify (const vec<df_mw_hardreg_ptr, va_heap> *new_rec,
ddc2d0e3 4100 struct df_mw_hardreg *old_rec,
3072d30e 4101 bool abort_if_fail)
4102{
f7f05a07 4103 unsigned int ix;
4104 struct df_mw_hardreg *new_reg;
4105
d70aebca 4106 FOR_EACH_VEC_ELT (*new_rec, ix, new_reg)
3072d30e 4107 {
ddc2d0e3 4108 if (old_rec == NULL || !df_mw_equal_p (new_reg, old_rec))
3072d30e 4109 {
4110 if (abort_if_fail)
4111 gcc_assert (0);
4112 else
4113 return false;
4114 }
ddc2d0e3 4115 old_rec = DF_MWS_NEXT (old_rec);
3072d30e 4116 }
4117
4118 if (abort_if_fail)
ddc2d0e3 4119 gcc_assert (old_rec == NULL);
3072d30e 4120 else
ddc2d0e3 4121 return old_rec == NULL;
3072d30e 4122 return false;
4123}
4124
4125
4126/* Return true if the existing insn refs information is complete and
4127 correct. Otherwise (i.e. if there's any missing or extra refs),
48e1416a 4128 return the correct df_ref chain in REFS_RETURN.
3072d30e 4129
4130 If ABORT_IF_FAIL, leave the refs that are verified (already in the
4131 ref chain) as DF_REF_MARKED(). If it's false, then it's a per-insn
4132 verification mode instead of the whole function, so unmark
4133 everything.
4134
4135 If ABORT_IF_FAIL is set, this function never returns false. */
4136
4137static bool
4138df_insn_refs_verify (struct df_collection_rec *collection_rec,
48e1416a 4139 basic_block bb,
e149ca56 4140 rtx_insn *insn,
3072d30e 4141 bool abort_if_fail)
4142{
4143 bool ret1, ret2, ret3, ret4;
4144 unsigned int uid = INSN_UID (insn);
158b6cc9 4145 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
3072d30e 4146
158b6cc9 4147 df_insn_refs_collect (collection_rec, bb, insn_info);
3072d30e 4148
3072d30e 4149 /* Unfortunately we cannot opt out early if one of these is not
4150 right because the marks will not get cleared. */
d70aebca 4151 ret1 = df_refs_verify (&collection_rec->def_vec, DF_INSN_UID_DEFS (uid),
3072d30e 4152 abort_if_fail);
d70aebca 4153 ret2 = df_refs_verify (&collection_rec->use_vec, DF_INSN_UID_USES (uid),
3072d30e 4154 abort_if_fail);
d70aebca 4155 ret3 = df_refs_verify (&collection_rec->eq_use_vec, DF_INSN_UID_EQ_USES (uid),
3072d30e 4156 abort_if_fail);
d70aebca 4157 ret4 = df_mws_verify (&collection_rec->mw_vec, DF_INSN_UID_MWS (uid),
3072d30e 4158 abort_if_fail);
4159 return (ret1 && ret2 && ret3 && ret4);
4160}
4161
4162
4163/* Return true if all refs in the basic block are correct and complete.
4164 Due to df_ref_chain_verify, it will cause all refs
4165 that are verified to have DF_REF_MARK bit set. */
4166
4167static bool
4168df_bb_verify (basic_block bb)
4169{
7a6a083c 4170 rtx_insn *insn;
3072d30e 4171 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
4172 struct df_collection_rec collection_rec;
48e1416a 4173
3072d30e 4174 gcc_assert (bb_info);
4175
ed6e85ae 4176 /* Scan the block, one insn at a time, from beginning to end. */
3072d30e 4177 FOR_BB_INSNS_REVERSE (bb, insn)
4178 {
4179 if (!INSN_P (insn))
4180 continue;
4181 df_insn_refs_verify (&collection_rec, bb, insn, true);
25d2985a 4182 df_free_collection_rec (&collection_rec);
3072d30e 4183 }
4184
4185 /* Do the artificial defs and uses. */
4186 df_bb_refs_collect (&collection_rec, bb);
d70aebca 4187 df_refs_verify (&collection_rec.def_vec, df_get_artificial_defs (bb->index), true);
4188 df_refs_verify (&collection_rec.use_vec, df_get_artificial_uses (bb->index), true);
3072d30e 4189 df_free_collection_rec (&collection_rec);
48e1416a 4190
3072d30e 4191 return true;
4192}
4193
4194
48e1416a 4195/* Returns true if the entry block has correct and complete df_ref set.
3072d30e 4196 If not it either aborts if ABORT_IF_FAIL is true or returns false. */
4197
4198static bool
4199df_entry_block_bitmap_verify (bool abort_if_fail)
4200{
4b5a4301 4201 bitmap_head entry_block_defs;
3072d30e 4202 bool is_eq;
4203
4b5a4301 4204 bitmap_initialize (&entry_block_defs, &df_bitmap_obstack);
4205 df_get_entry_block_def_set (&entry_block_defs);
3072d30e 4206
4b5a4301 4207 is_eq = bitmap_equal_p (&entry_block_defs, df->entry_block_defs);
3072d30e 4208
4209 if (!is_eq && abort_if_fail)
4210 {
3072d30e 4211 fprintf (stderr, "entry_block_defs = ");
4b5a4301 4212 df_print_regset (stderr, &entry_block_defs);
3072d30e 4213 fprintf (stderr, "df->entry_block_defs = ");
4214 df_print_regset (stderr, df->entry_block_defs);
4215 gcc_assert (0);
4216 }
4217
4b5a4301 4218 bitmap_clear (&entry_block_defs);
3072d30e 4219
4220 return is_eq;
4221}
4222
4223
48e1416a 4224/* Returns true if the exit block has correct and complete df_ref set.
3072d30e 4225 If not it either aborts if ABORT_IF_FAIL is true or returns false. */
4226
4227static bool
4228df_exit_block_bitmap_verify (bool abort_if_fail)
4229{
4b5a4301 4230 bitmap_head exit_block_uses;
3072d30e 4231 bool is_eq;
4232
4b5a4301 4233 bitmap_initialize (&exit_block_uses, &df_bitmap_obstack);
4234 df_get_exit_block_use_set (&exit_block_uses);
3072d30e 4235
4b5a4301 4236 is_eq = bitmap_equal_p (&exit_block_uses, df->exit_block_uses);
3072d30e 4237
4238 if (!is_eq && abort_if_fail)
4239 {
3072d30e 4240 fprintf (stderr, "exit_block_uses = ");
4b5a4301 4241 df_print_regset (stderr, &exit_block_uses);
3072d30e 4242 fprintf (stderr, "df->exit_block_uses = ");
4243 df_print_regset (stderr, df->exit_block_uses);
4244 gcc_assert (0);
4245 }
4246
4b5a4301 4247 bitmap_clear (&exit_block_uses);
3072d30e 4248
4249 return is_eq;
4250}
4251
4252
bf1f8fbc 4253/* Return true if df_ref information for all insns in all blocks are
4254 correct and complete. */
3072d30e 4255
4256void
4257df_scan_verify (void)
4258{
4259 unsigned int i;
4260 basic_block bb;
4b5a4301 4261 bitmap_head regular_block_artificial_uses;
4262 bitmap_head eh_block_artificial_uses;
3072d30e 4263
4264 if (!df)
4265 return;
4266
3072d30e 4267 /* Verification is a 4 step process. */
4268
9d75589a 4269 /* (1) All of the refs are marked by going through the reg chains. */
3072d30e 4270 for (i = 0; i < DF_REG_SIZE (df); i++)
4271 {
48e1416a 4272 gcc_assert (df_reg_chain_mark (DF_REG_DEF_CHAIN (i), i, true, false)
9af5ce0c 4273 == DF_REG_DEF_COUNT (i));
48e1416a 4274 gcc_assert (df_reg_chain_mark (DF_REG_USE_CHAIN (i), i, false, false)
9af5ce0c 4275 == DF_REG_USE_COUNT (i));
48e1416a 4276 gcc_assert (df_reg_chain_mark (DF_REG_EQ_USE_CHAIN (i), i, false, true)
9af5ce0c 4277 == DF_REG_EQ_USE_COUNT (i));
3072d30e 4278 }
4279
4280 /* (2) There are various bitmaps whose value may change over the
4281 course of the compilation. This step recomputes them to make
4282 sure that they have not slipped out of date. */
4b5a4301 4283 bitmap_initialize (&regular_block_artificial_uses, &df_bitmap_obstack);
4284 bitmap_initialize (&eh_block_artificial_uses, &df_bitmap_obstack);
3072d30e 4285
4b5a4301 4286 df_get_regular_block_artificial_uses (&regular_block_artificial_uses);
4287 df_get_eh_block_artificial_uses (&eh_block_artificial_uses);
3072d30e 4288
4b5a4301 4289 bitmap_ior_into (&eh_block_artificial_uses,
4290 &regular_block_artificial_uses);
3072d30e 4291
4292 /* Check artificial_uses bitmaps didn't change. */
4b5a4301 4293 gcc_assert (bitmap_equal_p (&regular_block_artificial_uses,
4294 &df->regular_block_artificial_uses));
4295 gcc_assert (bitmap_equal_p (&eh_block_artificial_uses,
4296 &df->eh_block_artificial_uses));
3072d30e 4297
4b5a4301 4298 bitmap_clear (&regular_block_artificial_uses);
4299 bitmap_clear (&eh_block_artificial_uses);
3072d30e 4300
4301 /* Verify entry block and exit block. These only verify the bitmaps,
4302 the refs are verified in df_bb_verify. */
4303 df_entry_block_bitmap_verify (true);
4304 df_exit_block_bitmap_verify (true);
48e1416a 4305
3072d30e 4306 /* (3) All of the insns in all of the blocks are traversed and the
4307 marks are cleared both in the artificial refs attached to the
4308 blocks and the real refs inside the insns. It is a failure to
4309 clear a mark that has not been set as this means that the ref in
4310 the block or insn was not in the reg chain. */
4311
ed7d889a 4312 FOR_ALL_BB_FN (bb, cfun)
3072d30e 4313 df_bb_verify (bb);
4314
4315 /* (4) See if all reg chains are traversed a second time. This time
4316 a check is made that the marks are clear. A set mark would be a
4317 from a reg that is not in any insn or basic block. */
4318
4319 for (i = 0; i < DF_REG_SIZE (df); i++)
4320 {
4321 df_reg_chain_verify_unmarked (DF_REG_DEF_CHAIN (i));
4322 df_reg_chain_verify_unmarked (DF_REG_USE_CHAIN (i));
4323 df_reg_chain_verify_unmarked (DF_REG_EQ_USE_CHAIN (i));
4324 }
4325}