]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/df-core.c
remove has_gate
[thirdparty/gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and for_each_rtx instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "function.h"
386 #include "regs.h"
387 #include "alloc-pool.h"
388 #include "flags.h"
389 #include "hard-reg-set.h"
390 #include "basic-block.h"
391 #include "sbitmap.h"
392 #include "bitmap.h"
393 #include "df.h"
394 #include "tree-pass.h"
395 #include "params.h"
396 #include "cfgloop.h"
397
398 static void *df_get_bb_info (struct dataflow *, unsigned int);
399 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
400 static void df_clear_bb_info (struct dataflow *, unsigned int);
401 #ifdef DF_DEBUG_CFG
402 static void df_set_clean_cfg (void);
403 #endif
404
405 /* The obstack on which regsets are allocated. */
406 struct bitmap_obstack reg_obstack;
407
408 /* An obstack for bitmap not related to specific dataflow problems.
409 This obstack should e.g. be used for bitmaps with a short life time
410 such as temporary bitmaps. */
411
412 bitmap_obstack df_bitmap_obstack;
413
414
415 /*----------------------------------------------------------------------------
416 Functions to create, destroy and manipulate an instance of df.
417 ----------------------------------------------------------------------------*/
418
419 struct df_d *df;
420
421 /* Add PROBLEM (and any dependent problems) to the DF instance. */
422
423 void
424 df_add_problem (struct df_problem *problem)
425 {
426 struct dataflow *dflow;
427 int i;
428
429 /* First try to add the dependent problem. */
430 if (problem->dependent_problem)
431 df_add_problem (problem->dependent_problem);
432
433 /* Check to see if this problem has already been defined. If it
434 has, just return that instance, if not, add it to the end of the
435 vector. */
436 dflow = df->problems_by_index[problem->id];
437 if (dflow)
438 return;
439
440 /* Make a new one and add it to the end. */
441 dflow = XCNEW (struct dataflow);
442 dflow->problem = problem;
443 dflow->computed = false;
444 dflow->solutions_dirty = true;
445 df->problems_by_index[dflow->problem->id] = dflow;
446
447 /* Keep the defined problems ordered by index. This solves the
448 problem that RI will use the information from UREC if UREC has
449 been defined, or from LIVE if LIVE is defined and otherwise LR.
450 However for this to work, the computation of RI must be pushed
451 after which ever of those problems is defined, but we do not
452 require any of those except for LR to have actually been
453 defined. */
454 df->num_problems_defined++;
455 for (i = df->num_problems_defined - 2; i >= 0; i--)
456 {
457 if (problem->id < df->problems_in_order[i]->problem->id)
458 df->problems_in_order[i+1] = df->problems_in_order[i];
459 else
460 {
461 df->problems_in_order[i+1] = dflow;
462 return;
463 }
464 }
465 df->problems_in_order[0] = dflow;
466 }
467
468
469 /* Set the MASK flags in the DFLOW problem. The old flags are
470 returned. If a flag is not allowed to be changed this will fail if
471 checking is enabled. */
472 int
473 df_set_flags (int changeable_flags)
474 {
475 int old_flags = df->changeable_flags;
476 df->changeable_flags |= changeable_flags;
477 return old_flags;
478 }
479
480
481 /* Clear the MASK flags in the DFLOW problem. The old flags are
482 returned. If a flag is not allowed to be changed this will fail if
483 checking is enabled. */
484 int
485 df_clear_flags (int changeable_flags)
486 {
487 int old_flags = df->changeable_flags;
488 df->changeable_flags &= ~changeable_flags;
489 return old_flags;
490 }
491
492
493 /* Set the blocks that are to be considered for analysis. If this is
494 not called or is called with null, the entire function in
495 analyzed. */
496
497 void
498 df_set_blocks (bitmap blocks)
499 {
500 if (blocks)
501 {
502 if (dump_file)
503 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
504 if (df->blocks_to_analyze)
505 {
506 /* This block is called to change the focus from one subset
507 to another. */
508 int p;
509 bitmap_head diff;
510 bitmap_initialize (&diff, &df_bitmap_obstack);
511 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
513 {
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
518 {
519 bitmap_iterator bi;
520 unsigned int bb_index;
521
522 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
523 {
524 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
525 if (bb)
526 {
527 void *bb_info = df_get_bb_info (dflow, bb_index);
528 dflow->problem->free_bb_fun (bb, bb_info);
529 df_clear_bb_info (dflow, bb_index);
530 }
531 }
532 }
533 }
534
535 bitmap_clear (&diff);
536 }
537 else
538 {
539 /* This block of code is executed to change the focus from
540 the entire function to a subset. */
541 bitmap_head blocks_to_reset;
542 bool initialized = false;
543 int p;
544 for (p = 0; p < df->num_problems_defined; p++)
545 {
546 struct dataflow *dflow = df->problems_in_order[p];
547 if (dflow->optional_p && dflow->problem->reset_fun)
548 {
549 if (!initialized)
550 {
551 basic_block bb;
552 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
553 FOR_ALL_BB_FN (bb, cfun)
554 {
555 bitmap_set_bit (&blocks_to_reset, bb->index);
556 }
557 }
558 dflow->problem->reset_fun (&blocks_to_reset);
559 }
560 }
561 if (initialized)
562 bitmap_clear (&blocks_to_reset);
563
564 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
565 }
566 bitmap_copy (df->blocks_to_analyze, blocks);
567 df->analyze_subset = true;
568 }
569 else
570 {
571 /* This block is executed to reset the focus to the entire
572 function. */
573 if (dump_file)
574 fprintf (dump_file, "clearing blocks_to_analyze\n");
575 if (df->blocks_to_analyze)
576 {
577 BITMAP_FREE (df->blocks_to_analyze);
578 df->blocks_to_analyze = NULL;
579 }
580 df->analyze_subset = false;
581 }
582
583 /* Setting the blocks causes the refs to be unorganized since only
584 the refs in the blocks are seen. */
585 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
586 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
587 df_mark_solutions_dirty ();
588 }
589
590
591 /* Delete a DFLOW problem (and any problems that depend on this
592 problem). */
593
594 void
595 df_remove_problem (struct dataflow *dflow)
596 {
597 struct df_problem *problem;
598 int i;
599
600 if (!dflow)
601 return;
602
603 problem = dflow->problem;
604 gcc_assert (problem->remove_problem_fun);
605
606 /* Delete any problems that depended on this problem first. */
607 for (i = 0; i < df->num_problems_defined; i++)
608 if (df->problems_in_order[i]->problem->dependent_problem == problem)
609 df_remove_problem (df->problems_in_order[i]);
610
611 /* Now remove this problem. */
612 for (i = 0; i < df->num_problems_defined; i++)
613 if (df->problems_in_order[i] == dflow)
614 {
615 int j;
616 for (j = i + 1; j < df->num_problems_defined; j++)
617 df->problems_in_order[j-1] = df->problems_in_order[j];
618 df->problems_in_order[j-1] = NULL;
619 df->num_problems_defined--;
620 break;
621 }
622
623 (problem->remove_problem_fun) ();
624 df->problems_by_index[problem->id] = NULL;
625 }
626
627
628 /* Remove all of the problems that are not permanent. Scanning, LR
629 and (at -O2 or higher) LIVE are permanent, the rest are removable.
630 Also clear all of the changeable_flags. */
631
632 void
633 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
634 {
635 int i;
636 int removed = 0;
637
638 #ifdef ENABLE_DF_CHECKING
639 int saved_flags;
640 #endif
641
642 if (!df)
643 return;
644
645 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
646 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
647
648 #ifdef ENABLE_DF_CHECKING
649 saved_flags = df->changeable_flags;
650 #endif
651
652 for (i = 0; i < df->num_problems_defined; i++)
653 {
654 struct dataflow *dflow = df->problems_in_order[i];
655 struct df_problem *problem = dflow->problem;
656
657 if (dflow->optional_p)
658 {
659 gcc_assert (problem->remove_problem_fun);
660 (problem->remove_problem_fun) ();
661 df->problems_in_order[i] = NULL;
662 df->problems_by_index[problem->id] = NULL;
663 removed++;
664 }
665 }
666 df->num_problems_defined -= removed;
667
668 /* Clear all of the flags. */
669 df->changeable_flags = 0;
670 df_process_deferred_rescans ();
671
672 /* Set the focus back to the whole function. */
673 if (df->blocks_to_analyze)
674 {
675 BITMAP_FREE (df->blocks_to_analyze);
676 df->blocks_to_analyze = NULL;
677 df_mark_solutions_dirty ();
678 df->analyze_subset = false;
679 }
680
681 #ifdef ENABLE_DF_CHECKING
682 /* Verification will fail in DF_NO_INSN_RESCAN. */
683 if (!(saved_flags & DF_NO_INSN_RESCAN))
684 {
685 df_lr_verify_transfer_functions ();
686 if (df_live)
687 df_live_verify_transfer_functions ();
688 }
689
690 #ifdef DF_DEBUG_CFG
691 df_set_clean_cfg ();
692 #endif
693 #endif
694
695 #ifdef ENABLE_CHECKING
696 if (verify)
697 df->changeable_flags |= DF_VERIFY_SCHEDULED;
698 #endif
699 }
700
701
702 /* Set up the dataflow instance for the entire back end. */
703
704 static unsigned int
705 rest_of_handle_df_initialize (void)
706 {
707 gcc_assert (!df);
708 df = XCNEW (struct df_d);
709 df->changeable_flags = 0;
710
711 bitmap_obstack_initialize (&df_bitmap_obstack);
712
713 /* Set this to a conservative value. Stack_ptr_mod will compute it
714 correctly later. */
715 crtl->sp_is_unchanging = 0;
716
717 df_scan_add_problem ();
718 df_scan_alloc (NULL);
719
720 /* These three problems are permanent. */
721 df_lr_add_problem ();
722 if (optimize > 1)
723 df_live_add_problem ();
724
725 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
726 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
727 df->n_blocks = post_order_compute (df->postorder, true, true);
728 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
729 gcc_assert (df->n_blocks == df->n_blocks_inverted);
730
731 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
732
733 df_hard_reg_init ();
734 /* After reload, some ports add certain bits to regs_ever_live so
735 this cannot be reset. */
736 df_compute_regs_ever_live (true);
737 df_scan_blocks ();
738 df_compute_regs_ever_live (false);
739 return 0;
740 }
741
742
743 static bool
744 gate_opt (void)
745 {
746 return optimize > 0;
747 }
748
749
750 namespace {
751
752 const pass_data pass_data_df_initialize_opt =
753 {
754 RTL_PASS, /* type */
755 "dfinit", /* name */
756 OPTGROUP_NONE, /* optinfo_flags */
757 true, /* has_execute */
758 TV_DF_SCAN, /* tv_id */
759 0, /* properties_required */
760 0, /* properties_provided */
761 0, /* properties_destroyed */
762 0, /* todo_flags_start */
763 0, /* todo_flags_finish */
764 };
765
766 class pass_df_initialize_opt : public rtl_opt_pass
767 {
768 public:
769 pass_df_initialize_opt (gcc::context *ctxt)
770 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
771 {}
772
773 /* opt_pass methods: */
774 bool gate () { return gate_opt (); }
775 unsigned int execute () { return rest_of_handle_df_initialize (); }
776
777 }; // class pass_df_initialize_opt
778
779 } // anon namespace
780
781 rtl_opt_pass *
782 make_pass_df_initialize_opt (gcc::context *ctxt)
783 {
784 return new pass_df_initialize_opt (ctxt);
785 }
786
787
788 static bool
789 gate_no_opt (void)
790 {
791 return optimize == 0;
792 }
793
794
795 namespace {
796
797 const pass_data pass_data_df_initialize_no_opt =
798 {
799 RTL_PASS, /* type */
800 "no-opt dfinit", /* name */
801 OPTGROUP_NONE, /* optinfo_flags */
802 true, /* has_execute */
803 TV_DF_SCAN, /* tv_id */
804 0, /* properties_required */
805 0, /* properties_provided */
806 0, /* properties_destroyed */
807 0, /* todo_flags_start */
808 0, /* todo_flags_finish */
809 };
810
811 class pass_df_initialize_no_opt : public rtl_opt_pass
812 {
813 public:
814 pass_df_initialize_no_opt (gcc::context *ctxt)
815 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
816 {}
817
818 /* opt_pass methods: */
819 bool gate () { return gate_no_opt (); }
820 unsigned int execute () { return rest_of_handle_df_initialize (); }
821
822 }; // class pass_df_initialize_no_opt
823
824 } // anon namespace
825
826 rtl_opt_pass *
827 make_pass_df_initialize_no_opt (gcc::context *ctxt)
828 {
829 return new pass_df_initialize_no_opt (ctxt);
830 }
831
832
833 /* Free all the dataflow info and the DF structure. This should be
834 called from the df_finish macro which also NULLs the parm. */
835
836 static unsigned int
837 rest_of_handle_df_finish (void)
838 {
839 int i;
840
841 gcc_assert (df);
842
843 for (i = 0; i < df->num_problems_defined; i++)
844 {
845 struct dataflow *dflow = df->problems_in_order[i];
846 dflow->problem->free_fun ();
847 }
848
849 free (df->postorder);
850 free (df->postorder_inverted);
851 free (df->hard_regs_live_count);
852 free (df);
853 df = NULL;
854
855 bitmap_obstack_release (&df_bitmap_obstack);
856 return 0;
857 }
858
859
860 namespace {
861
862 const pass_data pass_data_df_finish =
863 {
864 RTL_PASS, /* type */
865 "dfinish", /* name */
866 OPTGROUP_NONE, /* optinfo_flags */
867 true, /* has_execute */
868 TV_NONE, /* tv_id */
869 0, /* properties_required */
870 0, /* properties_provided */
871 0, /* properties_destroyed */
872 0, /* todo_flags_start */
873 0, /* todo_flags_finish */
874 };
875
876 class pass_df_finish : public rtl_opt_pass
877 {
878 public:
879 pass_df_finish (gcc::context *ctxt)
880 : rtl_opt_pass (pass_data_df_finish, ctxt)
881 {}
882
883 /* opt_pass methods: */
884 unsigned int execute () { return rest_of_handle_df_finish (); }
885
886 }; // class pass_df_finish
887
888 } // anon namespace
889
890 rtl_opt_pass *
891 make_pass_df_finish (gcc::context *ctxt)
892 {
893 return new pass_df_finish (ctxt);
894 }
895
896
897
898
899 \f
900 /*----------------------------------------------------------------------------
901 The general data flow analysis engine.
902 ----------------------------------------------------------------------------*/
903
904 /* Return time BB when it was visited for last time. */
905 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
906
907 /* Helper function for df_worklist_dataflow.
908 Propagate the dataflow forward.
909 Given a BB_INDEX, do the dataflow propagation
910 and set bits on for successors in PENDING
911 if the out set of the dataflow has changed.
912
913 AGE specify time when BB was visited last time.
914 AGE of 0 means we are visiting for first time and need to
915 compute transfer function to initialize datastructures.
916 Otherwise we re-do transfer function only if something change
917 while computing confluence functions.
918 We need to compute confluence only of basic block that are younger
919 then last visit of the BB.
920
921 Return true if BB info has changed. This is always the case
922 in the first visit. */
923
924 static bool
925 df_worklist_propagate_forward (struct dataflow *dataflow,
926 unsigned bb_index,
927 unsigned *bbindex_to_postorder,
928 bitmap pending,
929 sbitmap considered,
930 ptrdiff_t age)
931 {
932 edge e;
933 edge_iterator ei;
934 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
935 bool changed = !age;
936
937 /* Calculate <conf_op> of incoming edges. */
938 if (EDGE_COUNT (bb->preds) > 0)
939 FOR_EACH_EDGE (e, ei, bb->preds)
940 {
941 if (age <= BB_LAST_CHANGE_AGE (e->src)
942 && bitmap_bit_p (considered, e->src->index))
943 changed |= dataflow->problem->con_fun_n (e);
944 }
945 else if (dataflow->problem->con_fun_0)
946 dataflow->problem->con_fun_0 (bb);
947
948 if (changed
949 && dataflow->problem->trans_fun (bb_index))
950 {
951 /* The out set of this block has changed.
952 Propagate to the outgoing blocks. */
953 FOR_EACH_EDGE (e, ei, bb->succs)
954 {
955 unsigned ob_index = e->dest->index;
956
957 if (bitmap_bit_p (considered, ob_index))
958 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
959 }
960 return true;
961 }
962 return false;
963 }
964
965
966 /* Helper function for df_worklist_dataflow.
967 Propagate the dataflow backward. */
968
969 static bool
970 df_worklist_propagate_backward (struct dataflow *dataflow,
971 unsigned bb_index,
972 unsigned *bbindex_to_postorder,
973 bitmap pending,
974 sbitmap considered,
975 ptrdiff_t age)
976 {
977 edge e;
978 edge_iterator ei;
979 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
980 bool changed = !age;
981
982 /* Calculate <conf_op> of incoming edges. */
983 if (EDGE_COUNT (bb->succs) > 0)
984 FOR_EACH_EDGE (e, ei, bb->succs)
985 {
986 if (age <= BB_LAST_CHANGE_AGE (e->dest)
987 && bitmap_bit_p (considered, e->dest->index))
988 changed |= dataflow->problem->con_fun_n (e);
989 }
990 else if (dataflow->problem->con_fun_0)
991 dataflow->problem->con_fun_0 (bb);
992
993 if (changed
994 && dataflow->problem->trans_fun (bb_index))
995 {
996 /* The out set of this block has changed.
997 Propagate to the outgoing blocks. */
998 FOR_EACH_EDGE (e, ei, bb->preds)
999 {
1000 unsigned ob_index = e->src->index;
1001
1002 if (bitmap_bit_p (considered, ob_index))
1003 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
1004 }
1005 return true;
1006 }
1007 return false;
1008 }
1009
1010 /* Main dataflow solver loop.
1011
1012 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
1013 need to visit.
1014 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1015 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1016 PENDING will be freed.
1017
1018 The worklists are bitmaps indexed by postorder positions.
1019
1020 The function implements standard algorithm for dataflow solving with two
1021 worklists (we are processing WORKLIST and storing new BBs to visit in
1022 PENDING).
1023
1024 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1025 and when it was last visited (stored in last_visit_age). This avoids need
1026 to re-do confluence function for edges to basic blocks whose source
1027 did not change since destination was visited last time. */
1028
1029 static void
1030 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1031 bitmap pending,
1032 sbitmap considered,
1033 int *blocks_in_postorder,
1034 unsigned *bbindex_to_postorder,
1035 int n_blocks)
1036 {
1037 enum df_flow_dir dir = dataflow->problem->dir;
1038 int dcount = 0;
1039 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1040 int age = 0;
1041 bool changed;
1042 vec<int> last_visit_age = vNULL;
1043 int prev_age;
1044 basic_block bb;
1045 int i;
1046
1047 last_visit_age.safe_grow_cleared (n_blocks);
1048
1049 /* Double-queueing. Worklist is for the current iteration,
1050 and pending is for the next. */
1051 while (!bitmap_empty_p (pending))
1052 {
1053 bitmap_iterator bi;
1054 unsigned int index;
1055
1056 /* Swap pending and worklist. */
1057 bitmap temp = worklist;
1058 worklist = pending;
1059 pending = temp;
1060
1061 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1062 {
1063 unsigned bb_index;
1064 dcount++;
1065
1066 bitmap_clear_bit (pending, index);
1067 bb_index = blocks_in_postorder[index];
1068 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1069 prev_age = last_visit_age[index];
1070 if (dir == DF_FORWARD)
1071 changed = df_worklist_propagate_forward (dataflow, bb_index,
1072 bbindex_to_postorder,
1073 pending, considered,
1074 prev_age);
1075 else
1076 changed = df_worklist_propagate_backward (dataflow, bb_index,
1077 bbindex_to_postorder,
1078 pending, considered,
1079 prev_age);
1080 last_visit_age[index] = ++age;
1081 if (changed)
1082 bb->aux = (void *)(ptrdiff_t)age;
1083 }
1084 bitmap_clear (worklist);
1085 }
1086 for (i = 0; i < n_blocks; i++)
1087 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1088
1089 BITMAP_FREE (worklist);
1090 BITMAP_FREE (pending);
1091 last_visit_age.release ();
1092
1093 /* Dump statistics. */
1094 if (dump_file)
1095 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1096 "n_basic_blocks %d n_edges %d"
1097 " count %d (%5.2g)\n",
1098 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1099 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1100 }
1101
1102 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1103 with "n"-th bit representing the n-th block in the reverse-postorder order.
1104 The solver is a double-queue algorithm similar to the "double stack" solver
1105 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1106 The only significant difference is that the worklist in this implementation
1107 is always sorted in RPO of the CFG visiting direction. */
1108
1109 void
1110 df_worklist_dataflow (struct dataflow *dataflow,
1111 bitmap blocks_to_consider,
1112 int *blocks_in_postorder,
1113 int n_blocks)
1114 {
1115 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1116 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1117 bitmap_iterator bi;
1118 unsigned int *bbindex_to_postorder;
1119 int i;
1120 unsigned int index;
1121 enum df_flow_dir dir = dataflow->problem->dir;
1122
1123 gcc_assert (dir != DF_NONE);
1124
1125 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1126 bbindex_to_postorder = XNEWVEC (unsigned int,
1127 last_basic_block_for_fn (cfun));
1128
1129 /* Initialize the array to an out-of-bound value. */
1130 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1131 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1132
1133 /* Initialize the considered map. */
1134 bitmap_clear (considered);
1135 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1136 {
1137 bitmap_set_bit (considered, index);
1138 }
1139
1140 /* Initialize the mapping of block index to postorder. */
1141 for (i = 0; i < n_blocks; i++)
1142 {
1143 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1144 /* Add all blocks to the worklist. */
1145 bitmap_set_bit (pending, i);
1146 }
1147
1148 /* Initialize the problem. */
1149 if (dataflow->problem->init_fun)
1150 dataflow->problem->init_fun (blocks_to_consider);
1151
1152 /* Solve it. */
1153 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1154 blocks_in_postorder,
1155 bbindex_to_postorder,
1156 n_blocks);
1157 sbitmap_free (considered);
1158 free (bbindex_to_postorder);
1159 }
1160
1161
1162 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1163 the order of the remaining entries. Returns the length of the resulting
1164 list. */
1165
1166 static unsigned
1167 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1168 {
1169 unsigned act, last;
1170
1171 for (act = 0, last = 0; act < len; act++)
1172 if (bitmap_bit_p (blocks, list[act]))
1173 list[last++] = list[act];
1174
1175 return last;
1176 }
1177
1178
1179 /* Execute dataflow analysis on a single dataflow problem.
1180
1181 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1182 examined or will be computed. For calls from DF_ANALYZE, this is
1183 the set of blocks that has been passed to DF_SET_BLOCKS.
1184 */
1185
1186 void
1187 df_analyze_problem (struct dataflow *dflow,
1188 bitmap blocks_to_consider,
1189 int *postorder, int n_blocks)
1190 {
1191 timevar_push (dflow->problem->tv_id);
1192
1193 /* (Re)Allocate the datastructures necessary to solve the problem. */
1194 if (dflow->problem->alloc_fun)
1195 dflow->problem->alloc_fun (blocks_to_consider);
1196
1197 #ifdef ENABLE_DF_CHECKING
1198 if (dflow->problem->verify_start_fun)
1199 dflow->problem->verify_start_fun ();
1200 #endif
1201
1202 /* Set up the problem and compute the local information. */
1203 if (dflow->problem->local_compute_fun)
1204 dflow->problem->local_compute_fun (blocks_to_consider);
1205
1206 /* Solve the equations. */
1207 if (dflow->problem->dataflow_fun)
1208 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1209 postorder, n_blocks);
1210
1211 /* Massage the solution. */
1212 if (dflow->problem->finalize_fun)
1213 dflow->problem->finalize_fun (blocks_to_consider);
1214
1215 #ifdef ENABLE_DF_CHECKING
1216 if (dflow->problem->verify_end_fun)
1217 dflow->problem->verify_end_fun ();
1218 #endif
1219
1220 timevar_pop (dflow->problem->tv_id);
1221
1222 dflow->computed = true;
1223 }
1224
1225
1226 /* Analyze dataflow info. */
1227
1228 static void
1229 df_analyze_1 (void)
1230 {
1231 int i;
1232
1233 /* These should be the same. */
1234 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1235
1236 /* We need to do this before the df_verify_all because this is
1237 not kept incrementally up to date. */
1238 df_compute_regs_ever_live (false);
1239 df_process_deferred_rescans ();
1240
1241 if (dump_file)
1242 fprintf (dump_file, "df_analyze called\n");
1243
1244 #ifndef ENABLE_DF_CHECKING
1245 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1246 #endif
1247 df_verify ();
1248
1249 /* Skip over the DF_SCAN problem. */
1250 for (i = 1; i < df->num_problems_defined; i++)
1251 {
1252 struct dataflow *dflow = df->problems_in_order[i];
1253 if (dflow->solutions_dirty)
1254 {
1255 if (dflow->problem->dir == DF_FORWARD)
1256 df_analyze_problem (dflow,
1257 df->blocks_to_analyze,
1258 df->postorder_inverted,
1259 df->n_blocks_inverted);
1260 else
1261 df_analyze_problem (dflow,
1262 df->blocks_to_analyze,
1263 df->postorder,
1264 df->n_blocks);
1265 }
1266 }
1267
1268 if (!df->analyze_subset)
1269 {
1270 BITMAP_FREE (df->blocks_to_analyze);
1271 df->blocks_to_analyze = NULL;
1272 }
1273
1274 #ifdef DF_DEBUG_CFG
1275 df_set_clean_cfg ();
1276 #endif
1277 }
1278
1279 /* Analyze dataflow info. */
1280
1281 void
1282 df_analyze (void)
1283 {
1284 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1285 int i;
1286
1287 free (df->postorder);
1288 free (df->postorder_inverted);
1289 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1290 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1291 df->n_blocks = post_order_compute (df->postorder, true, true);
1292 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1293
1294 for (i = 0; i < df->n_blocks; i++)
1295 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1296
1297 #ifdef ENABLE_CHECKING
1298 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1299 the ENTRY block. */
1300 for (i = 0; i < df->n_blocks_inverted; i++)
1301 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1302 #endif
1303
1304 /* Make sure that we have pruned any unreachable blocks from these
1305 sets. */
1306 if (df->analyze_subset)
1307 {
1308 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1309 df->n_blocks = df_prune_to_subcfg (df->postorder,
1310 df->n_blocks, df->blocks_to_analyze);
1311 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1312 df->n_blocks_inverted,
1313 df->blocks_to_analyze);
1314 BITMAP_FREE (current_all_blocks);
1315 }
1316 else
1317 {
1318 df->blocks_to_analyze = current_all_blocks;
1319 current_all_blocks = NULL;
1320 }
1321
1322 df_analyze_1 ();
1323 }
1324
1325 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1326 Returns the number of blocks which is always loop->num_nodes. */
1327
1328 static int
1329 loop_post_order_compute (int *post_order, struct loop *loop)
1330 {
1331 edge_iterator *stack;
1332 int sp;
1333 int post_order_num = 0;
1334 bitmap visited;
1335
1336 /* Allocate stack for back-tracking up CFG. */
1337 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1338 sp = 0;
1339
1340 /* Allocate bitmap to track nodes that have been visited. */
1341 visited = BITMAP_ALLOC (NULL);
1342
1343 /* Push the first edge on to the stack. */
1344 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1345
1346 while (sp)
1347 {
1348 edge_iterator ei;
1349 basic_block src;
1350 basic_block dest;
1351
1352 /* Look at the edge on the top of the stack. */
1353 ei = stack[sp - 1];
1354 src = ei_edge (ei)->src;
1355 dest = ei_edge (ei)->dest;
1356
1357 /* Check if the edge destination has been visited yet and mark it
1358 if not so. */
1359 if (flow_bb_inside_loop_p (loop, dest)
1360 && bitmap_set_bit (visited, dest->index))
1361 {
1362 if (EDGE_COUNT (dest->succs) > 0)
1363 /* Since the DEST node has been visited for the first
1364 time, check its successors. */
1365 stack[sp++] = ei_start (dest->succs);
1366 else
1367 post_order[post_order_num++] = dest->index;
1368 }
1369 else
1370 {
1371 if (ei_one_before_end_p (ei)
1372 && src != loop_preheader_edge (loop)->src)
1373 post_order[post_order_num++] = src->index;
1374
1375 if (!ei_one_before_end_p (ei))
1376 ei_next (&stack[sp - 1]);
1377 else
1378 sp--;
1379 }
1380 }
1381
1382 free (stack);
1383 BITMAP_FREE (visited);
1384
1385 return post_order_num;
1386 }
1387
1388 /* Compute the reverse top sort order of the inverted sub-CFG specified
1389 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1390
1391 static int
1392 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1393 {
1394 basic_block bb;
1395 edge_iterator *stack;
1396 int sp;
1397 int post_order_num = 0;
1398 bitmap visited;
1399
1400 /* Allocate stack for back-tracking up CFG. */
1401 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1402 sp = 0;
1403
1404 /* Allocate bitmap to track nodes that have been visited. */
1405 visited = BITMAP_ALLOC (NULL);
1406
1407 /* Put all latches into the initial work list. In theory we'd want
1408 to start from loop exits but then we'd have the special case of
1409 endless loops. It doesn't really matter for DF iteration order and
1410 handling latches last is probably even better. */
1411 stack[sp++] = ei_start (loop->header->preds);
1412 bitmap_set_bit (visited, loop->header->index);
1413
1414 /* The inverted traversal loop. */
1415 while (sp)
1416 {
1417 edge_iterator ei;
1418 basic_block pred;
1419
1420 /* Look at the edge on the top of the stack. */
1421 ei = stack[sp - 1];
1422 bb = ei_edge (ei)->dest;
1423 pred = ei_edge (ei)->src;
1424
1425 /* Check if the predecessor has been visited yet and mark it
1426 if not so. */
1427 if (flow_bb_inside_loop_p (loop, pred)
1428 && bitmap_set_bit (visited, pred->index))
1429 {
1430 if (EDGE_COUNT (pred->preds) > 0)
1431 /* Since the predecessor node has been visited for the first
1432 time, check its predecessors. */
1433 stack[sp++] = ei_start (pred->preds);
1434 else
1435 post_order[post_order_num++] = pred->index;
1436 }
1437 else
1438 {
1439 if (flow_bb_inside_loop_p (loop, bb)
1440 && ei_one_before_end_p (ei))
1441 post_order[post_order_num++] = bb->index;
1442
1443 if (!ei_one_before_end_p (ei))
1444 ei_next (&stack[sp - 1]);
1445 else
1446 sp--;
1447 }
1448 }
1449
1450 free (stack);
1451 BITMAP_FREE (visited);
1452 return post_order_num;
1453 }
1454
1455
1456 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1457
1458 void
1459 df_analyze_loop (struct loop *loop)
1460 {
1461 free (df->postorder);
1462 free (df->postorder_inverted);
1463
1464 df->postorder = XNEWVEC (int, loop->num_nodes);
1465 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1466 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1467 df->n_blocks_inverted
1468 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1469 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1470 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1471
1472 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1473 for (int i = 0; i < df->n_blocks; ++i)
1474 bitmap_set_bit (blocks, df->postorder[i]);
1475 df_set_blocks (blocks);
1476 BITMAP_FREE (blocks);
1477
1478 df_analyze_1 ();
1479 }
1480
1481
1482 /* Return the number of basic blocks from the last call to df_analyze. */
1483
1484 int
1485 df_get_n_blocks (enum df_flow_dir dir)
1486 {
1487 gcc_assert (dir != DF_NONE);
1488
1489 if (dir == DF_FORWARD)
1490 {
1491 gcc_assert (df->postorder_inverted);
1492 return df->n_blocks_inverted;
1493 }
1494
1495 gcc_assert (df->postorder);
1496 return df->n_blocks;
1497 }
1498
1499
1500 /* Return a pointer to the array of basic blocks in the reverse postorder.
1501 Depending on the direction of the dataflow problem,
1502 it returns either the usual reverse postorder array
1503 or the reverse postorder of inverted traversal. */
1504 int *
1505 df_get_postorder (enum df_flow_dir dir)
1506 {
1507 gcc_assert (dir != DF_NONE);
1508
1509 if (dir == DF_FORWARD)
1510 {
1511 gcc_assert (df->postorder_inverted);
1512 return df->postorder_inverted;
1513 }
1514 gcc_assert (df->postorder);
1515 return df->postorder;
1516 }
1517
1518 static struct df_problem user_problem;
1519 static struct dataflow user_dflow;
1520
1521 /* Interface for calling iterative dataflow with user defined
1522 confluence and transfer functions. All that is necessary is to
1523 supply DIR, a direction, CONF_FUN_0, a confluence function for
1524 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1525 confluence function, TRANS_FUN, the basic block transfer function,
1526 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1527 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1528
1529 void
1530 df_simple_dataflow (enum df_flow_dir dir,
1531 df_init_function init_fun,
1532 df_confluence_function_0 con_fun_0,
1533 df_confluence_function_n con_fun_n,
1534 df_transfer_function trans_fun,
1535 bitmap blocks, int * postorder, int n_blocks)
1536 {
1537 memset (&user_problem, 0, sizeof (struct df_problem));
1538 user_problem.dir = dir;
1539 user_problem.init_fun = init_fun;
1540 user_problem.con_fun_0 = con_fun_0;
1541 user_problem.con_fun_n = con_fun_n;
1542 user_problem.trans_fun = trans_fun;
1543 user_dflow.problem = &user_problem;
1544 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1545 }
1546
1547
1548 \f
1549 /*----------------------------------------------------------------------------
1550 Functions to support limited incremental change.
1551 ----------------------------------------------------------------------------*/
1552
1553
1554 /* Get basic block info. */
1555
1556 static void *
1557 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1558 {
1559 if (dflow->block_info == NULL)
1560 return NULL;
1561 if (index >= dflow->block_info_size)
1562 return NULL;
1563 return (void *)((char *)dflow->block_info
1564 + index * dflow->problem->block_info_elt_size);
1565 }
1566
1567
1568 /* Set basic block info. */
1569
1570 static void
1571 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1572 void *bb_info)
1573 {
1574 gcc_assert (dflow->block_info);
1575 memcpy ((char *)dflow->block_info
1576 + index * dflow->problem->block_info_elt_size,
1577 bb_info, dflow->problem->block_info_elt_size);
1578 }
1579
1580
1581 /* Clear basic block info. */
1582
1583 static void
1584 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1585 {
1586 gcc_assert (dflow->block_info);
1587 gcc_assert (dflow->block_info_size > index);
1588 memset ((char *)dflow->block_info
1589 + index * dflow->problem->block_info_elt_size,
1590 0, dflow->problem->block_info_elt_size);
1591 }
1592
1593
1594 /* Mark the solutions as being out of date. */
1595
1596 void
1597 df_mark_solutions_dirty (void)
1598 {
1599 if (df)
1600 {
1601 int p;
1602 for (p = 1; p < df->num_problems_defined; p++)
1603 df->problems_in_order[p]->solutions_dirty = true;
1604 }
1605 }
1606
1607
1608 /* Return true if BB needs it's transfer functions recomputed. */
1609
1610 bool
1611 df_get_bb_dirty (basic_block bb)
1612 {
1613 return bitmap_bit_p ((df_live
1614 ? df_live : df_lr)->out_of_date_transfer_functions,
1615 bb->index);
1616 }
1617
1618
1619 /* Mark BB as needing it's transfer functions as being out of
1620 date. */
1621
1622 void
1623 df_set_bb_dirty (basic_block bb)
1624 {
1625 bb->flags |= BB_MODIFIED;
1626 if (df)
1627 {
1628 int p;
1629 for (p = 1; p < df->num_problems_defined; p++)
1630 {
1631 struct dataflow *dflow = df->problems_in_order[p];
1632 if (dflow->out_of_date_transfer_functions)
1633 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1634 }
1635 df_mark_solutions_dirty ();
1636 }
1637 }
1638
1639
1640 /* Grow the bb_info array. */
1641
1642 void
1643 df_grow_bb_info (struct dataflow *dflow)
1644 {
1645 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1646 if (dflow->block_info_size < new_size)
1647 {
1648 new_size += new_size / 4;
1649 dflow->block_info
1650 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1651 new_size
1652 * dflow->problem->block_info_elt_size);
1653 memset ((char *)dflow->block_info
1654 + dflow->block_info_size
1655 * dflow->problem->block_info_elt_size,
1656 0,
1657 (new_size - dflow->block_info_size)
1658 * dflow->problem->block_info_elt_size);
1659 dflow->block_info_size = new_size;
1660 }
1661 }
1662
1663
1664 /* Clear the dirty bits. This is called from places that delete
1665 blocks. */
1666 static void
1667 df_clear_bb_dirty (basic_block bb)
1668 {
1669 int p;
1670 for (p = 1; p < df->num_problems_defined; p++)
1671 {
1672 struct dataflow *dflow = df->problems_in_order[p];
1673 if (dflow->out_of_date_transfer_functions)
1674 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1675 }
1676 }
1677
1678 /* Called from the rtl_compact_blocks to reorganize the problems basic
1679 block info. */
1680
1681 void
1682 df_compact_blocks (void)
1683 {
1684 int i, p;
1685 basic_block bb;
1686 void *problem_temps;
1687 bitmap_head tmp;
1688
1689 bitmap_initialize (&tmp, &df_bitmap_obstack);
1690 for (p = 0; p < df->num_problems_defined; p++)
1691 {
1692 struct dataflow *dflow = df->problems_in_order[p];
1693
1694 /* Need to reorganize the out_of_date_transfer_functions for the
1695 dflow problem. */
1696 if (dflow->out_of_date_transfer_functions)
1697 {
1698 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1699 bitmap_clear (dflow->out_of_date_transfer_functions);
1700 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1701 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1702 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1703 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1704
1705 i = NUM_FIXED_BLOCKS;
1706 FOR_EACH_BB_FN (bb, cfun)
1707 {
1708 if (bitmap_bit_p (&tmp, bb->index))
1709 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1710 i++;
1711 }
1712 }
1713
1714 /* Now shuffle the block info for the problem. */
1715 if (dflow->problem->free_bb_fun)
1716 {
1717 int size = (last_basic_block_for_fn (cfun)
1718 * dflow->problem->block_info_elt_size);
1719 problem_temps = XNEWVAR (char, size);
1720 df_grow_bb_info (dflow);
1721 memcpy (problem_temps, dflow->block_info, size);
1722
1723 /* Copy the bb info from the problem tmps to the proper
1724 place in the block_info vector. Null out the copied
1725 item. The entry and exit blocks never move. */
1726 i = NUM_FIXED_BLOCKS;
1727 FOR_EACH_BB_FN (bb, cfun)
1728 {
1729 df_set_bb_info (dflow, i,
1730 (char *)problem_temps
1731 + bb->index * dflow->problem->block_info_elt_size);
1732 i++;
1733 }
1734 memset ((char *)dflow->block_info
1735 + i * dflow->problem->block_info_elt_size, 0,
1736 (last_basic_block_for_fn (cfun) - i)
1737 * dflow->problem->block_info_elt_size);
1738 free (problem_temps);
1739 }
1740 }
1741
1742 /* Shuffle the bits in the basic_block indexed arrays. */
1743
1744 if (df->blocks_to_analyze)
1745 {
1746 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1747 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1748 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1749 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1750 bitmap_copy (&tmp, df->blocks_to_analyze);
1751 bitmap_clear (df->blocks_to_analyze);
1752 i = NUM_FIXED_BLOCKS;
1753 FOR_EACH_BB_FN (bb, cfun)
1754 {
1755 if (bitmap_bit_p (&tmp, bb->index))
1756 bitmap_set_bit (df->blocks_to_analyze, i);
1757 i++;
1758 }
1759 }
1760
1761 bitmap_clear (&tmp);
1762
1763 i = NUM_FIXED_BLOCKS;
1764 FOR_EACH_BB_FN (bb, cfun)
1765 {
1766 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1767 bb->index = i;
1768 i++;
1769 }
1770
1771 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1772
1773 for (; i < last_basic_block_for_fn (cfun); i++)
1774 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1775
1776 #ifdef DF_DEBUG_CFG
1777 if (!df_lr->solutions_dirty)
1778 df_set_clean_cfg ();
1779 #endif
1780 }
1781
1782
1783 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1784 block. There is no excuse for people to do this kind of thing. */
1785
1786 void
1787 df_bb_replace (int old_index, basic_block new_block)
1788 {
1789 int new_block_index = new_block->index;
1790 int p;
1791
1792 if (dump_file)
1793 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1794
1795 gcc_assert (df);
1796 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1797
1798 for (p = 0; p < df->num_problems_defined; p++)
1799 {
1800 struct dataflow *dflow = df->problems_in_order[p];
1801 if (dflow->block_info)
1802 {
1803 df_grow_bb_info (dflow);
1804 df_set_bb_info (dflow, old_index,
1805 df_get_bb_info (dflow, new_block_index));
1806 }
1807 }
1808
1809 df_clear_bb_dirty (new_block);
1810 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1811 new_block->index = old_index;
1812 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1813 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1814 }
1815
1816
1817 /* Free all of the per basic block dataflow from all of the problems.
1818 This is typically called before a basic block is deleted and the
1819 problem will be reanalyzed. */
1820
1821 void
1822 df_bb_delete (int bb_index)
1823 {
1824 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1825 int i;
1826
1827 if (!df)
1828 return;
1829
1830 for (i = 0; i < df->num_problems_defined; i++)
1831 {
1832 struct dataflow *dflow = df->problems_in_order[i];
1833 if (dflow->problem->free_bb_fun)
1834 {
1835 void *bb_info = df_get_bb_info (dflow, bb_index);
1836 if (bb_info)
1837 {
1838 dflow->problem->free_bb_fun (bb, bb_info);
1839 df_clear_bb_info (dflow, bb_index);
1840 }
1841 }
1842 }
1843 df_clear_bb_dirty (bb);
1844 df_mark_solutions_dirty ();
1845 }
1846
1847
1848 /* Verify that there is a place for everything and everything is in
1849 its place. This is too expensive to run after every pass in the
1850 mainline. However this is an excellent debugging tool if the
1851 dataflow information is not being updated properly. You can just
1852 sprinkle calls in until you find the place that is changing an
1853 underlying structure without calling the proper updating
1854 routine. */
1855
1856 void
1857 df_verify (void)
1858 {
1859 df_scan_verify ();
1860 #ifdef ENABLE_DF_CHECKING
1861 df_lr_verify_transfer_functions ();
1862 if (df_live)
1863 df_live_verify_transfer_functions ();
1864 #endif
1865 }
1866
1867 #ifdef DF_DEBUG_CFG
1868
1869 /* Compute an array of ints that describes the cfg. This can be used
1870 to discover places where the cfg is modified by the appropriate
1871 calls have not been made to the keep df informed. The internals of
1872 this are unexciting, the key is that two instances of this can be
1873 compared to see if any changes have been made to the cfg. */
1874
1875 static int *
1876 df_compute_cfg_image (void)
1877 {
1878 basic_block bb;
1879 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1880 int i;
1881 int * map;
1882
1883 FOR_ALL_BB_FN (bb, cfun)
1884 {
1885 size += EDGE_COUNT (bb->succs);
1886 }
1887
1888 map = XNEWVEC (int, size);
1889 map[0] = size;
1890 i = 1;
1891 FOR_ALL_BB_FN (bb, cfun)
1892 {
1893 edge_iterator ei;
1894 edge e;
1895
1896 map[i++] = bb->index;
1897 FOR_EACH_EDGE (e, ei, bb->succs)
1898 map[i++] = e->dest->index;
1899 map[i++] = -1;
1900 }
1901 map[i] = -1;
1902 return map;
1903 }
1904
1905 static int *saved_cfg = NULL;
1906
1907
1908 /* This function compares the saved version of the cfg with the
1909 current cfg and aborts if the two are identical. The function
1910 silently returns if the cfg has been marked as dirty or the two are
1911 the same. */
1912
1913 void
1914 df_check_cfg_clean (void)
1915 {
1916 int *new_map;
1917
1918 if (!df)
1919 return;
1920
1921 if (df_lr->solutions_dirty)
1922 return;
1923
1924 if (saved_cfg == NULL)
1925 return;
1926
1927 new_map = df_compute_cfg_image ();
1928 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1929 free (new_map);
1930 }
1931
1932
1933 /* This function builds a cfg fingerprint and squirrels it away in
1934 saved_cfg. */
1935
1936 static void
1937 df_set_clean_cfg (void)
1938 {
1939 free (saved_cfg);
1940 saved_cfg = df_compute_cfg_image ();
1941 }
1942
1943 #endif /* DF_DEBUG_CFG */
1944 /*----------------------------------------------------------------------------
1945 PUBLIC INTERFACES TO QUERY INFORMATION.
1946 ----------------------------------------------------------------------------*/
1947
1948
1949 /* Return first def of REGNO within BB. */
1950
1951 df_ref
1952 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1953 {
1954 rtx insn;
1955 df_ref *def_rec;
1956 unsigned int uid;
1957
1958 FOR_BB_INSNS (bb, insn)
1959 {
1960 if (!INSN_P (insn))
1961 continue;
1962
1963 uid = INSN_UID (insn);
1964 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1965 {
1966 df_ref def = *def_rec;
1967 if (DF_REF_REGNO (def) == regno)
1968 return def;
1969 }
1970 }
1971 return NULL;
1972 }
1973
1974
1975 /* Return last def of REGNO within BB. */
1976
1977 df_ref
1978 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1979 {
1980 rtx insn;
1981 df_ref *def_rec;
1982 unsigned int uid;
1983
1984 FOR_BB_INSNS_REVERSE (bb, insn)
1985 {
1986 if (!INSN_P (insn))
1987 continue;
1988
1989 uid = INSN_UID (insn);
1990 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1991 {
1992 df_ref def = *def_rec;
1993 if (DF_REF_REGNO (def) == regno)
1994 return def;
1995 }
1996 }
1997
1998 return NULL;
1999 }
2000
2001 /* Finds the reference corresponding to the definition of REG in INSN.
2002 DF is the dataflow object. */
2003
2004 df_ref
2005 df_find_def (rtx insn, rtx reg)
2006 {
2007 unsigned int uid;
2008 df_ref *def_rec;
2009
2010 if (GET_CODE (reg) == SUBREG)
2011 reg = SUBREG_REG (reg);
2012 gcc_assert (REG_P (reg));
2013
2014 uid = INSN_UID (insn);
2015 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
2016 {
2017 df_ref def = *def_rec;
2018 if (DF_REF_REGNO (def) == REGNO (reg))
2019 return def;
2020 }
2021
2022 return NULL;
2023 }
2024
2025
2026 /* Return true if REG is defined in INSN, zero otherwise. */
2027
2028 bool
2029 df_reg_defined (rtx insn, rtx reg)
2030 {
2031 return df_find_def (insn, reg) != NULL;
2032 }
2033
2034
2035 /* Finds the reference corresponding to the use of REG in INSN.
2036 DF is the dataflow object. */
2037
2038 df_ref
2039 df_find_use (rtx insn, rtx reg)
2040 {
2041 unsigned int uid;
2042 df_ref *use_rec;
2043
2044 if (GET_CODE (reg) == SUBREG)
2045 reg = SUBREG_REG (reg);
2046 gcc_assert (REG_P (reg));
2047
2048 uid = INSN_UID (insn);
2049 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
2050 {
2051 df_ref use = *use_rec;
2052 if (DF_REF_REGNO (use) == REGNO (reg))
2053 return use;
2054 }
2055 if (df->changeable_flags & DF_EQ_NOTES)
2056 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
2057 {
2058 df_ref use = *use_rec;
2059 if (DF_REF_REGNO (use) == REGNO (reg))
2060 return use;
2061 }
2062 return NULL;
2063 }
2064
2065
2066 /* Return true if REG is referenced in INSN, zero otherwise. */
2067
2068 bool
2069 df_reg_used (rtx insn, rtx reg)
2070 {
2071 return df_find_use (insn, reg) != NULL;
2072 }
2073
2074 \f
2075 /*----------------------------------------------------------------------------
2076 Debugging and printing functions.
2077 ----------------------------------------------------------------------------*/
2078
2079 /* Write information about registers and basic blocks into FILE.
2080 This is part of making a debugging dump. */
2081
2082 void
2083 dump_regset (regset r, FILE *outf)
2084 {
2085 unsigned i;
2086 reg_set_iterator rsi;
2087
2088 if (r == NULL)
2089 {
2090 fputs (" (nil)", outf);
2091 return;
2092 }
2093
2094 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2095 {
2096 fprintf (outf, " %d", i);
2097 if (i < FIRST_PSEUDO_REGISTER)
2098 fprintf (outf, " [%s]",
2099 reg_names[i]);
2100 }
2101 }
2102
2103 /* Print a human-readable representation of R on the standard error
2104 stream. This function is designed to be used from within the
2105 debugger. */
2106 extern void debug_regset (regset);
2107 DEBUG_FUNCTION void
2108 debug_regset (regset r)
2109 {
2110 dump_regset (r, stderr);
2111 putc ('\n', stderr);
2112 }
2113
2114 /* Write information about registers and basic blocks into FILE.
2115 This is part of making a debugging dump. */
2116
2117 void
2118 df_print_regset (FILE *file, bitmap r)
2119 {
2120 unsigned int i;
2121 bitmap_iterator bi;
2122
2123 if (r == NULL)
2124 fputs (" (nil)", file);
2125 else
2126 {
2127 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2128 {
2129 fprintf (file, " %d", i);
2130 if (i < FIRST_PSEUDO_REGISTER)
2131 fprintf (file, " [%s]", reg_names[i]);
2132 }
2133 }
2134 fprintf (file, "\n");
2135 }
2136
2137
2138 /* Write information about registers and basic blocks into FILE. The
2139 bitmap is in the form used by df_byte_lr. This is part of making a
2140 debugging dump. */
2141
2142 void
2143 df_print_word_regset (FILE *file, bitmap r)
2144 {
2145 unsigned int max_reg = max_reg_num ();
2146
2147 if (r == NULL)
2148 fputs (" (nil)", file);
2149 else
2150 {
2151 unsigned int i;
2152 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2153 {
2154 bool found = (bitmap_bit_p (r, 2 * i)
2155 || bitmap_bit_p (r, 2 * i + 1));
2156 if (found)
2157 {
2158 int word;
2159 const char * sep = "";
2160 fprintf (file, " %d", i);
2161 fprintf (file, "(");
2162 for (word = 0; word < 2; word++)
2163 if (bitmap_bit_p (r, 2 * i + word))
2164 {
2165 fprintf (file, "%s%d", sep, word);
2166 sep = ", ";
2167 }
2168 fprintf (file, ")");
2169 }
2170 }
2171 }
2172 fprintf (file, "\n");
2173 }
2174
2175
2176 /* Dump dataflow info. */
2177
2178 void
2179 df_dump (FILE *file)
2180 {
2181 basic_block bb;
2182 df_dump_start (file);
2183
2184 FOR_ALL_BB_FN (bb, cfun)
2185 {
2186 df_print_bb_index (bb, file);
2187 df_dump_top (bb, file);
2188 df_dump_bottom (bb, file);
2189 }
2190
2191 fprintf (file, "\n");
2192 }
2193
2194
2195 /* Dump dataflow info for df->blocks_to_analyze. */
2196
2197 void
2198 df_dump_region (FILE *file)
2199 {
2200 if (df->blocks_to_analyze)
2201 {
2202 bitmap_iterator bi;
2203 unsigned int bb_index;
2204
2205 fprintf (file, "\n\nstarting region dump\n");
2206 df_dump_start (file);
2207
2208 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2209 {
2210 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2211 dump_bb (file, bb, 0, TDF_DETAILS);
2212 }
2213 fprintf (file, "\n");
2214 }
2215 else
2216 df_dump (file);
2217 }
2218
2219
2220 /* Dump the introductory information for each problem defined. */
2221
2222 void
2223 df_dump_start (FILE *file)
2224 {
2225 int i;
2226
2227 if (!df || !file)
2228 return;
2229
2230 fprintf (file, "\n\n%s\n", current_function_name ());
2231 fprintf (file, "\nDataflow summary:\n");
2232 if (df->blocks_to_analyze)
2233 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2234 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2235
2236 for (i = 0; i < df->num_problems_defined; i++)
2237 {
2238 struct dataflow *dflow = df->problems_in_order[i];
2239 if (dflow->computed)
2240 {
2241 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2242 if (fun)
2243 fun (file);
2244 }
2245 }
2246 }
2247
2248
2249 /* Dump the top or bottom of the block information for BB. */
2250 static void
2251 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2252 {
2253 int i;
2254
2255 if (!df || !file)
2256 return;
2257
2258 for (i = 0; i < df->num_problems_defined; i++)
2259 {
2260 struct dataflow *dflow = df->problems_in_order[i];
2261 if (dflow->computed)
2262 {
2263 df_dump_bb_problem_function bbfun;
2264
2265 if (top)
2266 bbfun = dflow->problem->dump_top_fun;
2267 else
2268 bbfun = dflow->problem->dump_bottom_fun;
2269
2270 if (bbfun)
2271 bbfun (bb, file);
2272 }
2273 }
2274 }
2275
2276 /* Dump the top of the block information for BB. */
2277
2278 void
2279 df_dump_top (basic_block bb, FILE *file)
2280 {
2281 df_dump_bb_problem_data (bb, file, /*top=*/true);
2282 }
2283
2284 /* Dump the bottom of the block information for BB. */
2285
2286 void
2287 df_dump_bottom (basic_block bb, FILE *file)
2288 {
2289 df_dump_bb_problem_data (bb, file, /*top=*/false);
2290 }
2291
2292
2293 /* Dump information about INSN just before or after dumping INSN itself. */
2294 static void
2295 df_dump_insn_problem_data (const_rtx insn, FILE *file, bool top)
2296 {
2297 int i;
2298
2299 if (!df || !file)
2300 return;
2301
2302 for (i = 0; i < df->num_problems_defined; i++)
2303 {
2304 struct dataflow *dflow = df->problems_in_order[i];
2305 if (dflow->computed)
2306 {
2307 df_dump_insn_problem_function insnfun;
2308
2309 if (top)
2310 insnfun = dflow->problem->dump_insn_top_fun;
2311 else
2312 insnfun = dflow->problem->dump_insn_bottom_fun;
2313
2314 if (insnfun)
2315 insnfun (insn, file);
2316 }
2317 }
2318 }
2319
2320 /* Dump information about INSN before dumping INSN itself. */
2321
2322 void
2323 df_dump_insn_top (const_rtx insn, FILE *file)
2324 {
2325 df_dump_insn_problem_data (insn, file, /*top=*/true);
2326 }
2327
2328 /* Dump information about INSN after dumping INSN itself. */
2329
2330 void
2331 df_dump_insn_bottom (const_rtx insn, FILE *file)
2332 {
2333 df_dump_insn_problem_data (insn, file, /*top=*/false);
2334 }
2335
2336
2337 static void
2338 df_ref_dump (df_ref ref, FILE *file)
2339 {
2340 fprintf (file, "%c%d(%d)",
2341 DF_REF_REG_DEF_P (ref)
2342 ? 'd'
2343 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2344 DF_REF_ID (ref),
2345 DF_REF_REGNO (ref));
2346 }
2347
2348 void
2349 df_refs_chain_dump (df_ref *ref_rec, bool follow_chain, FILE *file)
2350 {
2351 fprintf (file, "{ ");
2352 while (*ref_rec)
2353 {
2354 df_ref ref = *ref_rec;
2355 df_ref_dump (ref, file);
2356 if (follow_chain)
2357 df_chain_dump (DF_REF_CHAIN (ref), file);
2358 ref_rec++;
2359 }
2360 fprintf (file, "}");
2361 }
2362
2363
2364 /* Dump either a ref-def or reg-use chain. */
2365
2366 void
2367 df_regs_chain_dump (df_ref ref, FILE *file)
2368 {
2369 fprintf (file, "{ ");
2370 while (ref)
2371 {
2372 df_ref_dump (ref, file);
2373 ref = DF_REF_NEXT_REG (ref);
2374 }
2375 fprintf (file, "}");
2376 }
2377
2378
2379 static void
2380 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
2381 {
2382 while (*mws)
2383 {
2384 fprintf (file, "mw %c r[%d..%d]\n",
2385 (DF_MWS_REG_DEF_P (*mws)) ? 'd' : 'u',
2386 (*mws)->start_regno, (*mws)->end_regno);
2387 mws++;
2388 }
2389 }
2390
2391
2392 static void
2393 df_insn_uid_debug (unsigned int uid,
2394 bool follow_chain, FILE *file)
2395 {
2396 fprintf (file, "insn %d luid %d",
2397 uid, DF_INSN_UID_LUID (uid));
2398
2399 if (DF_INSN_UID_DEFS (uid))
2400 {
2401 fprintf (file, " defs ");
2402 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2403 }
2404
2405 if (DF_INSN_UID_USES (uid))
2406 {
2407 fprintf (file, " uses ");
2408 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2409 }
2410
2411 if (DF_INSN_UID_EQ_USES (uid))
2412 {
2413 fprintf (file, " eq uses ");
2414 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2415 }
2416
2417 if (DF_INSN_UID_MWS (uid))
2418 {
2419 fprintf (file, " mws ");
2420 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2421 }
2422 fprintf (file, "\n");
2423 }
2424
2425
2426 DEBUG_FUNCTION void
2427 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2428 {
2429 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2430 }
2431
2432 DEBUG_FUNCTION void
2433 df_insn_debug_regno (rtx insn, FILE *file)
2434 {
2435 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2436
2437 fprintf (file, "insn %d bb %d luid %d defs ",
2438 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2439 DF_INSN_INFO_LUID (insn_info));
2440 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2441
2442 fprintf (file, " uses ");
2443 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2444
2445 fprintf (file, " eq_uses ");
2446 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2447 fprintf (file, "\n");
2448 }
2449
2450 DEBUG_FUNCTION void
2451 df_regno_debug (unsigned int regno, FILE *file)
2452 {
2453 fprintf (file, "reg %d defs ", regno);
2454 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2455 fprintf (file, " uses ");
2456 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2457 fprintf (file, " eq_uses ");
2458 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2459 fprintf (file, "\n");
2460 }
2461
2462
2463 DEBUG_FUNCTION void
2464 df_ref_debug (df_ref ref, FILE *file)
2465 {
2466 fprintf (file, "%c%d ",
2467 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2468 DF_REF_ID (ref));
2469 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2470 DF_REF_REGNO (ref),
2471 DF_REF_BBNO (ref),
2472 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2473 DF_REF_FLAGS (ref),
2474 DF_REF_TYPE (ref));
2475 if (DF_REF_LOC (ref))
2476 {
2477 if (flag_dump_noaddr)
2478 fprintf (file, "loc #(#) chain ");
2479 else
2480 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2481 (void *)*DF_REF_LOC (ref));
2482 }
2483 else
2484 fprintf (file, "chain ");
2485 df_chain_dump (DF_REF_CHAIN (ref), file);
2486 fprintf (file, "\n");
2487 }
2488 \f
2489 /* Functions for debugging from GDB. */
2490
2491 DEBUG_FUNCTION void
2492 debug_df_insn (rtx insn)
2493 {
2494 df_insn_debug (insn, true, stderr);
2495 debug_rtx (insn);
2496 }
2497
2498
2499 DEBUG_FUNCTION void
2500 debug_df_reg (rtx reg)
2501 {
2502 df_regno_debug (REGNO (reg), stderr);
2503 }
2504
2505
2506 DEBUG_FUNCTION void
2507 debug_df_regno (unsigned int regno)
2508 {
2509 df_regno_debug (regno, stderr);
2510 }
2511
2512
2513 DEBUG_FUNCTION void
2514 debug_df_ref (df_ref ref)
2515 {
2516 df_ref_debug (ref, stderr);
2517 }
2518
2519
2520 DEBUG_FUNCTION void
2521 debug_df_defno (unsigned int defno)
2522 {
2523 df_ref_debug (DF_DEFS_GET (defno), stderr);
2524 }
2525
2526
2527 DEBUG_FUNCTION void
2528 debug_df_useno (unsigned int defno)
2529 {
2530 df_ref_debug (DF_USES_GET (defno), stderr);
2531 }
2532
2533
2534 DEBUG_FUNCTION void
2535 debug_df_chain (struct df_link *link)
2536 {
2537 df_chain_dump (link, stderr);
2538 fputc ('\n', stderr);
2539 }