]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/df-core.c
pass current function to opt_pass::gate ()
[thirdparty/gcc.git] / gcc / df-core.c
1 /* Allocation for dataflow support routines.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Originally contributed by Michael P. Hayes
4 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6 and Kenneth Zadeck (zadeck@naturalbridge.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /*
25 OVERVIEW:
26
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems. The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains. However,
34 the interface allows other dataflow problems to be defined as well.
35
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish). It is quite likely
38 that these boundaries will be expanded in the future. The only
39 requirement is that there be a correct control flow graph.
40
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available. The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable. The LIVE
45 problem finds the intersection of these two areas.
46
47 There are several optional problems. These can be enabled when they
48 are needed and disabled when they are not needed.
49
50 Dataflow problems are generally solved in three layers. The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn. Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also. There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution. The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62
63 The top layer is the dataflow solution itself. The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions. The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67
68
69 USAGE:
70
71 Here is an example of using the dataflow routines.
72
73 df_[chain,live,note,rd]_add_problem (flags);
74
75 df_set_blocks (blocks);
76
77 df_analyze ();
78
79 df_dump (stderr);
80
81 df_finish_pass (false);
82
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df. All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87
88 Problems can be dependent on other problems. For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem. Note that it is not necessary to have a problem. In
94 that case, df will just be used to do the scanning.
95
96
97
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed. The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution. The insn level information
103 is always kept up to date.
104
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set. Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110
111 DF_ANALYZE causes all of the defined problems to be (re)solved. When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information. The DF_*_BB_INFO macros can be used
114 to access these bitvectors. All deferred rescannings are down before
115 the transfer functions are recomputed.
116
117 DF_DUMP can then be called to dump the information produce to some
118 file. This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information. These parts
121 can all be called separately as part of a larger dump function.
122
123
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems. It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run. However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132
133 INCREMENTAL SCANNING
134
135 There are four ways of doing the incremental scanning:
136
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138 df_bb_delete, df_insn_change_bb have been added to most of
139 the low level service functions that maintain the cfg and change
140 rtl. Calling and of these routines many cause some number of insns
141 to be rescanned.
142
143 For most modern rtl passes, this is certainly the easiest way to
144 manage rescanning the insns. This technique also has the advantage
145 that the scanning information is always correct and can be relied
146 upon even after changes have been made to the instructions. This
147 technique is contra indicated in several cases:
148
149 a) If def-use chains OR use-def chains (but not both) are built,
150 using this is SIMPLY WRONG. The problem is that when a ref is
151 deleted that is the target of an edge, there is not enough
152 information to efficiently find the source of the edge and
153 delete the edge. This leaves a dangling reference that may
154 cause problems.
155
156 b) If def-use chains AND use-def chains are built, this may
157 produce unexpected results. The problem is that the incremental
158 scanning of an insn does not know how to repair the chains that
159 point into an insn when the insn changes. So the incremental
160 scanning just deletes the chains that enter and exit the insn
161 being changed. The dangling reference issue in (a) is not a
162 problem here, but if the pass is depending on the chains being
163 maintained after insns have been modified, this technique will
164 not do the correct thing.
165
166 c) If the pass modifies insns several times, this incremental
167 updating may be expensive.
168
169 d) If the pass modifies all of the insns, as does register
170 allocation, it is simply better to rescan the entire function.
171
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173 df_insn_delete do not immediately change the insn but instead make
174 a note that the insn needs to be rescanned. The next call to
175 df_analyze, df_finish_pass, or df_process_deferred_rescans will
176 cause all of the pending rescans to be processed.
177
178 This is the technique of choice if either 1a, 1b, or 1c are issues
179 in the pass. In the case of 1a or 1b, a call to df_finish_pass
180 (either manually or via TODO_df_finish) should be made before the
181 next call to df_analyze or df_process_deferred_rescans.
182
183 This mode is also used by a few passes that still rely on note_uses,
184 note_stores and for_each_rtx instead of using the DF data. This
185 can be said to fall under case 1c.
186
187 To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188 (This mode can be cleared by calling df_clear_flags
189 (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190 be rescanned.
191
192 3) Total rescanning - In this mode the rescanning is disabled.
193 Only when insns are deleted is the df information associated with
194 it also deleted. At the end of the pass, a call must be made to
195 df_insn_rescan_all. This method is used by the register allocator
196 since it generally changes each insn multiple times (once for each ref)
197 and does not need to make use of the updated scanning information.
198
199 4) Do it yourself - In this mechanism, the pass updates the insns
200 itself using the low level df primitives. Currently no pass does
201 this, but it has the advantage that it is quite efficient given
202 that the pass generally has exact knowledge of what it is changing.
203
204 DATA STRUCTURES
205
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within. The refs are linked together in
209 chains of uses and defs for each insn and for each register. Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use. This is used to create use-def or def-use
212 chains.
213
214 Different optimizations have different needs. Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219
220
221 PHILOSOPHY:
222
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable. The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical. The amount of work to recompute the chain any
230 chain after an arbitrary change is large. However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date. The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration. In general, restarting a
238 dataflow iteration is difficult and expensive. Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c. However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245
246
247 DATA STRUCTURES:
248
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists. For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260
261 ACCESSING INSNS:
262
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264 The array is indexed by insn uid, and every DF_REF points to the
265 DF_INSN_INFO object of the insn that contains the reference.
266
267 2) Each insn has three sets of refs, which are linked into one of three
268 lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269 DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270 (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271 DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272 DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273 The latter list are the list of references in REG_EQUAL or REG_EQUIV
274 notes. These macros produce a ref (or NULL), the rest of the list
275 can be obtained by traversal of the NEXT_REF field (accessed by the
276 DF_REF_NEXT_REF macro.) There is no significance to the ordering of
277 the uses or refs in an instruction.
278
279 3) Each insn has a logical uid field (LUID) which is stored in the
280 DF_INSN_INFO object for the insn. The LUID field is accessed by
281 the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282 When properly set, the LUID is an integer that numbers each insn in
283 the basic block, in order from the start of the block.
284 The numbers are only correct after a call to df_analyze. They will
285 rot after insns are added deleted or moved round.
286
287 ACCESSING REFS:
288
289 There are 4 ways to obtain access to refs:
290
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292
293 REAL refs are associated with instructions.
294
295 ARTIFICIAL refs are associated with basic blocks. The heads of
296 these lists can be accessed by calling df_get_artificial_defs or
297 df_get_artificial_uses for the particular basic block.
298
299 Artificial defs and uses occur both at the beginning and ends of blocks.
300
301 For blocks that area at the destination of eh edges, the
302 artificial uses and defs occur at the beginning. The defs relate
303 to the registers specified in EH_RETURN_DATA_REGNO and the uses
304 relate to the registers specified in ED_USES. Logically these
305 defs and uses should really occur along the eh edge, but there is
306 no convenient way to do this. Artificial edges that occur at the
307 beginning of the block have the DF_REF_AT_TOP flag set.
308
309 Artificial uses occur at the end of all blocks. These arise from
310 the hard registers that are always live, such as the stack
311 register and are put there to keep the code from forgetting about
312 them.
313
314 Artificial defs occur at the end of the entry block. These arise
315 from registers that are live at entry to the function.
316
317 2) There are three types of refs: defs, uses and eq_uses. (Eq_uses are
318 uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319
320 All of the eq_uses, uses and defs associated with each pseudo or
321 hard register may be linked in a bidirectional chain. These are
322 called reg-use or reg_def chains. If the changeable flag
323 DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324 treated like uses. If it is not set they are ignored.
325
326 The first use, eq_use or def for a register can be obtained using
327 the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328 macros. Subsequent uses for the same regno can be obtained by
329 following the next_reg field of the ref. The number of elements in
330 each of the chains can be found by using the DF_REG_USE_COUNT,
331 DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332
333 In previous versions of this code, these chains were ordered. It
334 has not been practical to continue this practice.
335
336 3) If def-use or use-def chains are built, these can be traversed to
337 get to other refs. If the flag DF_EQ_NOTES has been set, the chains
338 include the eq_uses. Otherwise these are ignored when building the
339 chains.
340
341 4) An array of all of the uses (and an array of all of the defs) can
342 be built. These arrays are indexed by the value in the id
343 structure. These arrays are only lazily kept up to date, and that
344 process can be expensive. To have these arrays built, call
345 df_reorganize_defs or df_reorganize_uses. If the flag DF_EQ_NOTES
346 has been set the array will contain the eq_uses. Otherwise these
347 are ignored when building the array and assigning the ids. Note
348 that the values in the id field of a ref may change across calls to
349 df_analyze or df_reorganize_defs or df_reorganize_uses.
350
351 If the only use of this array is to find all of the refs, it is
352 better to traverse all of the registers and then traverse all of
353 reg-use or reg-def chains.
354
355 NOTES:
356
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def. These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation. We generate both a use and a def and again mark them
370 read/write.
371
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375
376
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "tm.h"
381 #include "rtl.h"
382 #include "tm_p.h"
383 #include "insn-config.h"
384 #include "recog.h"
385 #include "function.h"
386 #include "regs.h"
387 #include "alloc-pool.h"
388 #include "flags.h"
389 #include "hard-reg-set.h"
390 #include "basic-block.h"
391 #include "sbitmap.h"
392 #include "bitmap.h"
393 #include "df.h"
394 #include "tree-pass.h"
395 #include "params.h"
396 #include "cfgloop.h"
397
398 static void *df_get_bb_info (struct dataflow *, unsigned int);
399 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
400 static void df_clear_bb_info (struct dataflow *, unsigned int);
401 #ifdef DF_DEBUG_CFG
402 static void df_set_clean_cfg (void);
403 #endif
404
405 /* The obstack on which regsets are allocated. */
406 struct bitmap_obstack reg_obstack;
407
408 /* An obstack for bitmap not related to specific dataflow problems.
409 This obstack should e.g. be used for bitmaps with a short life time
410 such as temporary bitmaps. */
411
412 bitmap_obstack df_bitmap_obstack;
413
414
415 /*----------------------------------------------------------------------------
416 Functions to create, destroy and manipulate an instance of df.
417 ----------------------------------------------------------------------------*/
418
419 struct df_d *df;
420
421 /* Add PROBLEM (and any dependent problems) to the DF instance. */
422
423 void
424 df_add_problem (struct df_problem *problem)
425 {
426 struct dataflow *dflow;
427 int i;
428
429 /* First try to add the dependent problem. */
430 if (problem->dependent_problem)
431 df_add_problem (problem->dependent_problem);
432
433 /* Check to see if this problem has already been defined. If it
434 has, just return that instance, if not, add it to the end of the
435 vector. */
436 dflow = df->problems_by_index[problem->id];
437 if (dflow)
438 return;
439
440 /* Make a new one and add it to the end. */
441 dflow = XCNEW (struct dataflow);
442 dflow->problem = problem;
443 dflow->computed = false;
444 dflow->solutions_dirty = true;
445 df->problems_by_index[dflow->problem->id] = dflow;
446
447 /* Keep the defined problems ordered by index. This solves the
448 problem that RI will use the information from UREC if UREC has
449 been defined, or from LIVE if LIVE is defined and otherwise LR.
450 However for this to work, the computation of RI must be pushed
451 after which ever of those problems is defined, but we do not
452 require any of those except for LR to have actually been
453 defined. */
454 df->num_problems_defined++;
455 for (i = df->num_problems_defined - 2; i >= 0; i--)
456 {
457 if (problem->id < df->problems_in_order[i]->problem->id)
458 df->problems_in_order[i+1] = df->problems_in_order[i];
459 else
460 {
461 df->problems_in_order[i+1] = dflow;
462 return;
463 }
464 }
465 df->problems_in_order[0] = dflow;
466 }
467
468
469 /* Set the MASK flags in the DFLOW problem. The old flags are
470 returned. If a flag is not allowed to be changed this will fail if
471 checking is enabled. */
472 int
473 df_set_flags (int changeable_flags)
474 {
475 int old_flags = df->changeable_flags;
476 df->changeable_flags |= changeable_flags;
477 return old_flags;
478 }
479
480
481 /* Clear the MASK flags in the DFLOW problem. The old flags are
482 returned. If a flag is not allowed to be changed this will fail if
483 checking is enabled. */
484 int
485 df_clear_flags (int changeable_flags)
486 {
487 int old_flags = df->changeable_flags;
488 df->changeable_flags &= ~changeable_flags;
489 return old_flags;
490 }
491
492
493 /* Set the blocks that are to be considered for analysis. If this is
494 not called or is called with null, the entire function in
495 analyzed. */
496
497 void
498 df_set_blocks (bitmap blocks)
499 {
500 if (blocks)
501 {
502 if (dump_file)
503 bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
504 if (df->blocks_to_analyze)
505 {
506 /* This block is called to change the focus from one subset
507 to another. */
508 int p;
509 bitmap_head diff;
510 bitmap_initialize (&diff, &df_bitmap_obstack);
511 bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
512 for (p = 0; p < df->num_problems_defined; p++)
513 {
514 struct dataflow *dflow = df->problems_in_order[p];
515 if (dflow->optional_p && dflow->problem->reset_fun)
516 dflow->problem->reset_fun (df->blocks_to_analyze);
517 else if (dflow->problem->free_blocks_on_set_blocks)
518 {
519 bitmap_iterator bi;
520 unsigned int bb_index;
521
522 EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
523 {
524 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
525 if (bb)
526 {
527 void *bb_info = df_get_bb_info (dflow, bb_index);
528 dflow->problem->free_bb_fun (bb, bb_info);
529 df_clear_bb_info (dflow, bb_index);
530 }
531 }
532 }
533 }
534
535 bitmap_clear (&diff);
536 }
537 else
538 {
539 /* This block of code is executed to change the focus from
540 the entire function to a subset. */
541 bitmap_head blocks_to_reset;
542 bool initialized = false;
543 int p;
544 for (p = 0; p < df->num_problems_defined; p++)
545 {
546 struct dataflow *dflow = df->problems_in_order[p];
547 if (dflow->optional_p && dflow->problem->reset_fun)
548 {
549 if (!initialized)
550 {
551 basic_block bb;
552 bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
553 FOR_ALL_BB_FN (bb, cfun)
554 {
555 bitmap_set_bit (&blocks_to_reset, bb->index);
556 }
557 }
558 dflow->problem->reset_fun (&blocks_to_reset);
559 }
560 }
561 if (initialized)
562 bitmap_clear (&blocks_to_reset);
563
564 df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
565 }
566 bitmap_copy (df->blocks_to_analyze, blocks);
567 df->analyze_subset = true;
568 }
569 else
570 {
571 /* This block is executed to reset the focus to the entire
572 function. */
573 if (dump_file)
574 fprintf (dump_file, "clearing blocks_to_analyze\n");
575 if (df->blocks_to_analyze)
576 {
577 BITMAP_FREE (df->blocks_to_analyze);
578 df->blocks_to_analyze = NULL;
579 }
580 df->analyze_subset = false;
581 }
582
583 /* Setting the blocks causes the refs to be unorganized since only
584 the refs in the blocks are seen. */
585 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
586 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
587 df_mark_solutions_dirty ();
588 }
589
590
591 /* Delete a DFLOW problem (and any problems that depend on this
592 problem). */
593
594 void
595 df_remove_problem (struct dataflow *dflow)
596 {
597 struct df_problem *problem;
598 int i;
599
600 if (!dflow)
601 return;
602
603 problem = dflow->problem;
604 gcc_assert (problem->remove_problem_fun);
605
606 /* Delete any problems that depended on this problem first. */
607 for (i = 0; i < df->num_problems_defined; i++)
608 if (df->problems_in_order[i]->problem->dependent_problem == problem)
609 df_remove_problem (df->problems_in_order[i]);
610
611 /* Now remove this problem. */
612 for (i = 0; i < df->num_problems_defined; i++)
613 if (df->problems_in_order[i] == dflow)
614 {
615 int j;
616 for (j = i + 1; j < df->num_problems_defined; j++)
617 df->problems_in_order[j-1] = df->problems_in_order[j];
618 df->problems_in_order[j-1] = NULL;
619 df->num_problems_defined--;
620 break;
621 }
622
623 (problem->remove_problem_fun) ();
624 df->problems_by_index[problem->id] = NULL;
625 }
626
627
628 /* Remove all of the problems that are not permanent. Scanning, LR
629 and (at -O2 or higher) LIVE are permanent, the rest are removable.
630 Also clear all of the changeable_flags. */
631
632 void
633 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
634 {
635 int i;
636 int removed = 0;
637
638 #ifdef ENABLE_DF_CHECKING
639 int saved_flags;
640 #endif
641
642 if (!df)
643 return;
644
645 df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
646 df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
647
648 #ifdef ENABLE_DF_CHECKING
649 saved_flags = df->changeable_flags;
650 #endif
651
652 for (i = 0; i < df->num_problems_defined; i++)
653 {
654 struct dataflow *dflow = df->problems_in_order[i];
655 struct df_problem *problem = dflow->problem;
656
657 if (dflow->optional_p)
658 {
659 gcc_assert (problem->remove_problem_fun);
660 (problem->remove_problem_fun) ();
661 df->problems_in_order[i] = NULL;
662 df->problems_by_index[problem->id] = NULL;
663 removed++;
664 }
665 }
666 df->num_problems_defined -= removed;
667
668 /* Clear all of the flags. */
669 df->changeable_flags = 0;
670 df_process_deferred_rescans ();
671
672 /* Set the focus back to the whole function. */
673 if (df->blocks_to_analyze)
674 {
675 BITMAP_FREE (df->blocks_to_analyze);
676 df->blocks_to_analyze = NULL;
677 df_mark_solutions_dirty ();
678 df->analyze_subset = false;
679 }
680
681 #ifdef ENABLE_DF_CHECKING
682 /* Verification will fail in DF_NO_INSN_RESCAN. */
683 if (!(saved_flags & DF_NO_INSN_RESCAN))
684 {
685 df_lr_verify_transfer_functions ();
686 if (df_live)
687 df_live_verify_transfer_functions ();
688 }
689
690 #ifdef DF_DEBUG_CFG
691 df_set_clean_cfg ();
692 #endif
693 #endif
694
695 #ifdef ENABLE_CHECKING
696 if (verify)
697 df->changeable_flags |= DF_VERIFY_SCHEDULED;
698 #endif
699 }
700
701
702 /* Set up the dataflow instance for the entire back end. */
703
704 static unsigned int
705 rest_of_handle_df_initialize (void)
706 {
707 gcc_assert (!df);
708 df = XCNEW (struct df_d);
709 df->changeable_flags = 0;
710
711 bitmap_obstack_initialize (&df_bitmap_obstack);
712
713 /* Set this to a conservative value. Stack_ptr_mod will compute it
714 correctly later. */
715 crtl->sp_is_unchanging = 0;
716
717 df_scan_add_problem ();
718 df_scan_alloc (NULL);
719
720 /* These three problems are permanent. */
721 df_lr_add_problem ();
722 if (optimize > 1)
723 df_live_add_problem ();
724
725 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
726 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
727 df->n_blocks = post_order_compute (df->postorder, true, true);
728 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
729 gcc_assert (df->n_blocks == df->n_blocks_inverted);
730
731 df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
732
733 df_hard_reg_init ();
734 /* After reload, some ports add certain bits to regs_ever_live so
735 this cannot be reset. */
736 df_compute_regs_ever_live (true);
737 df_scan_blocks ();
738 df_compute_regs_ever_live (false);
739 return 0;
740 }
741
742
743 namespace {
744
745 const pass_data pass_data_df_initialize_opt =
746 {
747 RTL_PASS, /* type */
748 "dfinit", /* name */
749 OPTGROUP_NONE, /* optinfo_flags */
750 true, /* has_execute */
751 TV_DF_SCAN, /* tv_id */
752 0, /* properties_required */
753 0, /* properties_provided */
754 0, /* properties_destroyed */
755 0, /* todo_flags_start */
756 0, /* todo_flags_finish */
757 };
758
759 class pass_df_initialize_opt : public rtl_opt_pass
760 {
761 public:
762 pass_df_initialize_opt (gcc::context *ctxt)
763 : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
764 {}
765
766 /* opt_pass methods: */
767 virtual bool gate (function *) { return optimize > 0; }
768 unsigned int execute () { return rest_of_handle_df_initialize (); }
769
770 }; // class pass_df_initialize_opt
771
772 } // anon namespace
773
774 rtl_opt_pass *
775 make_pass_df_initialize_opt (gcc::context *ctxt)
776 {
777 return new pass_df_initialize_opt (ctxt);
778 }
779
780
781 namespace {
782
783 const pass_data pass_data_df_initialize_no_opt =
784 {
785 RTL_PASS, /* type */
786 "no-opt dfinit", /* name */
787 OPTGROUP_NONE, /* optinfo_flags */
788 true, /* has_execute */
789 TV_DF_SCAN, /* tv_id */
790 0, /* properties_required */
791 0, /* properties_provided */
792 0, /* properties_destroyed */
793 0, /* todo_flags_start */
794 0, /* todo_flags_finish */
795 };
796
797 class pass_df_initialize_no_opt : public rtl_opt_pass
798 {
799 public:
800 pass_df_initialize_no_opt (gcc::context *ctxt)
801 : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
802 {}
803
804 /* opt_pass methods: */
805 virtual bool gate (function *) { return optimize == 0; }
806 unsigned int execute () { return rest_of_handle_df_initialize (); }
807
808 }; // class pass_df_initialize_no_opt
809
810 } // anon namespace
811
812 rtl_opt_pass *
813 make_pass_df_initialize_no_opt (gcc::context *ctxt)
814 {
815 return new pass_df_initialize_no_opt (ctxt);
816 }
817
818
819 /* Free all the dataflow info and the DF structure. This should be
820 called from the df_finish macro which also NULLs the parm. */
821
822 static unsigned int
823 rest_of_handle_df_finish (void)
824 {
825 int i;
826
827 gcc_assert (df);
828
829 for (i = 0; i < df->num_problems_defined; i++)
830 {
831 struct dataflow *dflow = df->problems_in_order[i];
832 dflow->problem->free_fun ();
833 }
834
835 free (df->postorder);
836 free (df->postorder_inverted);
837 free (df->hard_regs_live_count);
838 free (df);
839 df = NULL;
840
841 bitmap_obstack_release (&df_bitmap_obstack);
842 return 0;
843 }
844
845
846 namespace {
847
848 const pass_data pass_data_df_finish =
849 {
850 RTL_PASS, /* type */
851 "dfinish", /* name */
852 OPTGROUP_NONE, /* optinfo_flags */
853 true, /* has_execute */
854 TV_NONE, /* tv_id */
855 0, /* properties_required */
856 0, /* properties_provided */
857 0, /* properties_destroyed */
858 0, /* todo_flags_start */
859 0, /* todo_flags_finish */
860 };
861
862 class pass_df_finish : public rtl_opt_pass
863 {
864 public:
865 pass_df_finish (gcc::context *ctxt)
866 : rtl_opt_pass (pass_data_df_finish, ctxt)
867 {}
868
869 /* opt_pass methods: */
870 unsigned int execute () { return rest_of_handle_df_finish (); }
871
872 }; // class pass_df_finish
873
874 } // anon namespace
875
876 rtl_opt_pass *
877 make_pass_df_finish (gcc::context *ctxt)
878 {
879 return new pass_df_finish (ctxt);
880 }
881
882
883
884
885 \f
886 /*----------------------------------------------------------------------------
887 The general data flow analysis engine.
888 ----------------------------------------------------------------------------*/
889
890 /* Return time BB when it was visited for last time. */
891 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
892
893 /* Helper function for df_worklist_dataflow.
894 Propagate the dataflow forward.
895 Given a BB_INDEX, do the dataflow propagation
896 and set bits on for successors in PENDING
897 if the out set of the dataflow has changed.
898
899 AGE specify time when BB was visited last time.
900 AGE of 0 means we are visiting for first time and need to
901 compute transfer function to initialize datastructures.
902 Otherwise we re-do transfer function only if something change
903 while computing confluence functions.
904 We need to compute confluence only of basic block that are younger
905 then last visit of the BB.
906
907 Return true if BB info has changed. This is always the case
908 in the first visit. */
909
910 static bool
911 df_worklist_propagate_forward (struct dataflow *dataflow,
912 unsigned bb_index,
913 unsigned *bbindex_to_postorder,
914 bitmap pending,
915 sbitmap considered,
916 ptrdiff_t age)
917 {
918 edge e;
919 edge_iterator ei;
920 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
921 bool changed = !age;
922
923 /* Calculate <conf_op> of incoming edges. */
924 if (EDGE_COUNT (bb->preds) > 0)
925 FOR_EACH_EDGE (e, ei, bb->preds)
926 {
927 if (age <= BB_LAST_CHANGE_AGE (e->src)
928 && bitmap_bit_p (considered, e->src->index))
929 changed |= dataflow->problem->con_fun_n (e);
930 }
931 else if (dataflow->problem->con_fun_0)
932 dataflow->problem->con_fun_0 (bb);
933
934 if (changed
935 && dataflow->problem->trans_fun (bb_index))
936 {
937 /* The out set of this block has changed.
938 Propagate to the outgoing blocks. */
939 FOR_EACH_EDGE (e, ei, bb->succs)
940 {
941 unsigned ob_index = e->dest->index;
942
943 if (bitmap_bit_p (considered, ob_index))
944 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
945 }
946 return true;
947 }
948 return false;
949 }
950
951
952 /* Helper function for df_worklist_dataflow.
953 Propagate the dataflow backward. */
954
955 static bool
956 df_worklist_propagate_backward (struct dataflow *dataflow,
957 unsigned bb_index,
958 unsigned *bbindex_to_postorder,
959 bitmap pending,
960 sbitmap considered,
961 ptrdiff_t age)
962 {
963 edge e;
964 edge_iterator ei;
965 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
966 bool changed = !age;
967
968 /* Calculate <conf_op> of incoming edges. */
969 if (EDGE_COUNT (bb->succs) > 0)
970 FOR_EACH_EDGE (e, ei, bb->succs)
971 {
972 if (age <= BB_LAST_CHANGE_AGE (e->dest)
973 && bitmap_bit_p (considered, e->dest->index))
974 changed |= dataflow->problem->con_fun_n (e);
975 }
976 else if (dataflow->problem->con_fun_0)
977 dataflow->problem->con_fun_0 (bb);
978
979 if (changed
980 && dataflow->problem->trans_fun (bb_index))
981 {
982 /* The out set of this block has changed.
983 Propagate to the outgoing blocks. */
984 FOR_EACH_EDGE (e, ei, bb->preds)
985 {
986 unsigned ob_index = e->src->index;
987
988 if (bitmap_bit_p (considered, ob_index))
989 bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
990 }
991 return true;
992 }
993 return false;
994 }
995
996 /* Main dataflow solver loop.
997
998 DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
999 need to visit.
1000 BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
1001 BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
1002 PENDING will be freed.
1003
1004 The worklists are bitmaps indexed by postorder positions.
1005
1006 The function implements standard algorithm for dataflow solving with two
1007 worklists (we are processing WORKLIST and storing new BBs to visit in
1008 PENDING).
1009
1010 As an optimization we maintain ages when BB was changed (stored in bb->aux)
1011 and when it was last visited (stored in last_visit_age). This avoids need
1012 to re-do confluence function for edges to basic blocks whose source
1013 did not change since destination was visited last time. */
1014
1015 static void
1016 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1017 bitmap pending,
1018 sbitmap considered,
1019 int *blocks_in_postorder,
1020 unsigned *bbindex_to_postorder,
1021 int n_blocks)
1022 {
1023 enum df_flow_dir dir = dataflow->problem->dir;
1024 int dcount = 0;
1025 bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1026 int age = 0;
1027 bool changed;
1028 vec<int> last_visit_age = vNULL;
1029 int prev_age;
1030 basic_block bb;
1031 int i;
1032
1033 last_visit_age.safe_grow_cleared (n_blocks);
1034
1035 /* Double-queueing. Worklist is for the current iteration,
1036 and pending is for the next. */
1037 while (!bitmap_empty_p (pending))
1038 {
1039 bitmap_iterator bi;
1040 unsigned int index;
1041
1042 /* Swap pending and worklist. */
1043 bitmap temp = worklist;
1044 worklist = pending;
1045 pending = temp;
1046
1047 EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1048 {
1049 unsigned bb_index;
1050 dcount++;
1051
1052 bitmap_clear_bit (pending, index);
1053 bb_index = blocks_in_postorder[index];
1054 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1055 prev_age = last_visit_age[index];
1056 if (dir == DF_FORWARD)
1057 changed = df_worklist_propagate_forward (dataflow, bb_index,
1058 bbindex_to_postorder,
1059 pending, considered,
1060 prev_age);
1061 else
1062 changed = df_worklist_propagate_backward (dataflow, bb_index,
1063 bbindex_to_postorder,
1064 pending, considered,
1065 prev_age);
1066 last_visit_age[index] = ++age;
1067 if (changed)
1068 bb->aux = (void *)(ptrdiff_t)age;
1069 }
1070 bitmap_clear (worklist);
1071 }
1072 for (i = 0; i < n_blocks; i++)
1073 BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1074
1075 BITMAP_FREE (worklist);
1076 BITMAP_FREE (pending);
1077 last_visit_age.release ();
1078
1079 /* Dump statistics. */
1080 if (dump_file)
1081 fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1082 "n_basic_blocks %d n_edges %d"
1083 " count %d (%5.2g)\n",
1084 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1085 dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1086 }
1087
1088 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1089 with "n"-th bit representing the n-th block in the reverse-postorder order.
1090 The solver is a double-queue algorithm similar to the "double stack" solver
1091 from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1092 The only significant difference is that the worklist in this implementation
1093 is always sorted in RPO of the CFG visiting direction. */
1094
1095 void
1096 df_worklist_dataflow (struct dataflow *dataflow,
1097 bitmap blocks_to_consider,
1098 int *blocks_in_postorder,
1099 int n_blocks)
1100 {
1101 bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1102 sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
1103 bitmap_iterator bi;
1104 unsigned int *bbindex_to_postorder;
1105 int i;
1106 unsigned int index;
1107 enum df_flow_dir dir = dataflow->problem->dir;
1108
1109 gcc_assert (dir != DF_NONE);
1110
1111 /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
1112 bbindex_to_postorder = XNEWVEC (unsigned int,
1113 last_basic_block_for_fn (cfun));
1114
1115 /* Initialize the array to an out-of-bound value. */
1116 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1117 bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1118
1119 /* Initialize the considered map. */
1120 bitmap_clear (considered);
1121 EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1122 {
1123 bitmap_set_bit (considered, index);
1124 }
1125
1126 /* Initialize the mapping of block index to postorder. */
1127 for (i = 0; i < n_blocks; i++)
1128 {
1129 bbindex_to_postorder[blocks_in_postorder[i]] = i;
1130 /* Add all blocks to the worklist. */
1131 bitmap_set_bit (pending, i);
1132 }
1133
1134 /* Initialize the problem. */
1135 if (dataflow->problem->init_fun)
1136 dataflow->problem->init_fun (blocks_to_consider);
1137
1138 /* Solve it. */
1139 df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1140 blocks_in_postorder,
1141 bbindex_to_postorder,
1142 n_blocks);
1143 sbitmap_free (considered);
1144 free (bbindex_to_postorder);
1145 }
1146
1147
1148 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1149 the order of the remaining entries. Returns the length of the resulting
1150 list. */
1151
1152 static unsigned
1153 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1154 {
1155 unsigned act, last;
1156
1157 for (act = 0, last = 0; act < len; act++)
1158 if (bitmap_bit_p (blocks, list[act]))
1159 list[last++] = list[act];
1160
1161 return last;
1162 }
1163
1164
1165 /* Execute dataflow analysis on a single dataflow problem.
1166
1167 BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1168 examined or will be computed. For calls from DF_ANALYZE, this is
1169 the set of blocks that has been passed to DF_SET_BLOCKS.
1170 */
1171
1172 void
1173 df_analyze_problem (struct dataflow *dflow,
1174 bitmap blocks_to_consider,
1175 int *postorder, int n_blocks)
1176 {
1177 timevar_push (dflow->problem->tv_id);
1178
1179 /* (Re)Allocate the datastructures necessary to solve the problem. */
1180 if (dflow->problem->alloc_fun)
1181 dflow->problem->alloc_fun (blocks_to_consider);
1182
1183 #ifdef ENABLE_DF_CHECKING
1184 if (dflow->problem->verify_start_fun)
1185 dflow->problem->verify_start_fun ();
1186 #endif
1187
1188 /* Set up the problem and compute the local information. */
1189 if (dflow->problem->local_compute_fun)
1190 dflow->problem->local_compute_fun (blocks_to_consider);
1191
1192 /* Solve the equations. */
1193 if (dflow->problem->dataflow_fun)
1194 dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1195 postorder, n_blocks);
1196
1197 /* Massage the solution. */
1198 if (dflow->problem->finalize_fun)
1199 dflow->problem->finalize_fun (blocks_to_consider);
1200
1201 #ifdef ENABLE_DF_CHECKING
1202 if (dflow->problem->verify_end_fun)
1203 dflow->problem->verify_end_fun ();
1204 #endif
1205
1206 timevar_pop (dflow->problem->tv_id);
1207
1208 dflow->computed = true;
1209 }
1210
1211
1212 /* Analyze dataflow info. */
1213
1214 static void
1215 df_analyze_1 (void)
1216 {
1217 int i;
1218
1219 /* These should be the same. */
1220 gcc_assert (df->n_blocks == df->n_blocks_inverted);
1221
1222 /* We need to do this before the df_verify_all because this is
1223 not kept incrementally up to date. */
1224 df_compute_regs_ever_live (false);
1225 df_process_deferred_rescans ();
1226
1227 if (dump_file)
1228 fprintf (dump_file, "df_analyze called\n");
1229
1230 #ifndef ENABLE_DF_CHECKING
1231 if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1232 #endif
1233 df_verify ();
1234
1235 /* Skip over the DF_SCAN problem. */
1236 for (i = 1; i < df->num_problems_defined; i++)
1237 {
1238 struct dataflow *dflow = df->problems_in_order[i];
1239 if (dflow->solutions_dirty)
1240 {
1241 if (dflow->problem->dir == DF_FORWARD)
1242 df_analyze_problem (dflow,
1243 df->blocks_to_analyze,
1244 df->postorder_inverted,
1245 df->n_blocks_inverted);
1246 else
1247 df_analyze_problem (dflow,
1248 df->blocks_to_analyze,
1249 df->postorder,
1250 df->n_blocks);
1251 }
1252 }
1253
1254 if (!df->analyze_subset)
1255 {
1256 BITMAP_FREE (df->blocks_to_analyze);
1257 df->blocks_to_analyze = NULL;
1258 }
1259
1260 #ifdef DF_DEBUG_CFG
1261 df_set_clean_cfg ();
1262 #endif
1263 }
1264
1265 /* Analyze dataflow info. */
1266
1267 void
1268 df_analyze (void)
1269 {
1270 bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1271 int i;
1272
1273 free (df->postorder);
1274 free (df->postorder_inverted);
1275 df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1276 df->postorder_inverted = XNEWVEC (int, last_basic_block_for_fn (cfun));
1277 df->n_blocks = post_order_compute (df->postorder, true, true);
1278 df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
1279
1280 for (i = 0; i < df->n_blocks; i++)
1281 bitmap_set_bit (current_all_blocks, df->postorder[i]);
1282
1283 #ifdef ENABLE_CHECKING
1284 /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1285 the ENTRY block. */
1286 for (i = 0; i < df->n_blocks_inverted; i++)
1287 gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
1288 #endif
1289
1290 /* Make sure that we have pruned any unreachable blocks from these
1291 sets. */
1292 if (df->analyze_subset)
1293 {
1294 bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1295 df->n_blocks = df_prune_to_subcfg (df->postorder,
1296 df->n_blocks, df->blocks_to_analyze);
1297 df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
1298 df->n_blocks_inverted,
1299 df->blocks_to_analyze);
1300 BITMAP_FREE (current_all_blocks);
1301 }
1302 else
1303 {
1304 df->blocks_to_analyze = current_all_blocks;
1305 current_all_blocks = NULL;
1306 }
1307
1308 df_analyze_1 ();
1309 }
1310
1311 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1312 Returns the number of blocks which is always loop->num_nodes. */
1313
1314 static int
1315 loop_post_order_compute (int *post_order, struct loop *loop)
1316 {
1317 edge_iterator *stack;
1318 int sp;
1319 int post_order_num = 0;
1320 bitmap visited;
1321
1322 /* Allocate stack for back-tracking up CFG. */
1323 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1324 sp = 0;
1325
1326 /* Allocate bitmap to track nodes that have been visited. */
1327 visited = BITMAP_ALLOC (NULL);
1328
1329 /* Push the first edge on to the stack. */
1330 stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1331
1332 while (sp)
1333 {
1334 edge_iterator ei;
1335 basic_block src;
1336 basic_block dest;
1337
1338 /* Look at the edge on the top of the stack. */
1339 ei = stack[sp - 1];
1340 src = ei_edge (ei)->src;
1341 dest = ei_edge (ei)->dest;
1342
1343 /* Check if the edge destination has been visited yet and mark it
1344 if not so. */
1345 if (flow_bb_inside_loop_p (loop, dest)
1346 && bitmap_set_bit (visited, dest->index))
1347 {
1348 if (EDGE_COUNT (dest->succs) > 0)
1349 /* Since the DEST node has been visited for the first
1350 time, check its successors. */
1351 stack[sp++] = ei_start (dest->succs);
1352 else
1353 post_order[post_order_num++] = dest->index;
1354 }
1355 else
1356 {
1357 if (ei_one_before_end_p (ei)
1358 && src != loop_preheader_edge (loop)->src)
1359 post_order[post_order_num++] = src->index;
1360
1361 if (!ei_one_before_end_p (ei))
1362 ei_next (&stack[sp - 1]);
1363 else
1364 sp--;
1365 }
1366 }
1367
1368 free (stack);
1369 BITMAP_FREE (visited);
1370
1371 return post_order_num;
1372 }
1373
1374 /* Compute the reverse top sort order of the inverted sub-CFG specified
1375 by LOOP. Returns the number of blocks which is always loop->num_nodes. */
1376
1377 static int
1378 loop_inverted_post_order_compute (int *post_order, struct loop *loop)
1379 {
1380 basic_block bb;
1381 edge_iterator *stack;
1382 int sp;
1383 int post_order_num = 0;
1384 bitmap visited;
1385
1386 /* Allocate stack for back-tracking up CFG. */
1387 stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1388 sp = 0;
1389
1390 /* Allocate bitmap to track nodes that have been visited. */
1391 visited = BITMAP_ALLOC (NULL);
1392
1393 /* Put all latches into the initial work list. In theory we'd want
1394 to start from loop exits but then we'd have the special case of
1395 endless loops. It doesn't really matter for DF iteration order and
1396 handling latches last is probably even better. */
1397 stack[sp++] = ei_start (loop->header->preds);
1398 bitmap_set_bit (visited, loop->header->index);
1399
1400 /* The inverted traversal loop. */
1401 while (sp)
1402 {
1403 edge_iterator ei;
1404 basic_block pred;
1405
1406 /* Look at the edge on the top of the stack. */
1407 ei = stack[sp - 1];
1408 bb = ei_edge (ei)->dest;
1409 pred = ei_edge (ei)->src;
1410
1411 /* Check if the predecessor has been visited yet and mark it
1412 if not so. */
1413 if (flow_bb_inside_loop_p (loop, pred)
1414 && bitmap_set_bit (visited, pred->index))
1415 {
1416 if (EDGE_COUNT (pred->preds) > 0)
1417 /* Since the predecessor node has been visited for the first
1418 time, check its predecessors. */
1419 stack[sp++] = ei_start (pred->preds);
1420 else
1421 post_order[post_order_num++] = pred->index;
1422 }
1423 else
1424 {
1425 if (flow_bb_inside_loop_p (loop, bb)
1426 && ei_one_before_end_p (ei))
1427 post_order[post_order_num++] = bb->index;
1428
1429 if (!ei_one_before_end_p (ei))
1430 ei_next (&stack[sp - 1]);
1431 else
1432 sp--;
1433 }
1434 }
1435
1436 free (stack);
1437 BITMAP_FREE (visited);
1438 return post_order_num;
1439 }
1440
1441
1442 /* Analyze dataflow info for the basic blocks contained in LOOP. */
1443
1444 void
1445 df_analyze_loop (struct loop *loop)
1446 {
1447 free (df->postorder);
1448 free (df->postorder_inverted);
1449
1450 df->postorder = XNEWVEC (int, loop->num_nodes);
1451 df->postorder_inverted = XNEWVEC (int, loop->num_nodes);
1452 df->n_blocks = loop_post_order_compute (df->postorder, loop);
1453 df->n_blocks_inverted
1454 = loop_inverted_post_order_compute (df->postorder_inverted, loop);
1455 gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1456 gcc_assert ((unsigned) df->n_blocks_inverted == loop->num_nodes);
1457
1458 bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1459 for (int i = 0; i < df->n_blocks; ++i)
1460 bitmap_set_bit (blocks, df->postorder[i]);
1461 df_set_blocks (blocks);
1462 BITMAP_FREE (blocks);
1463
1464 df_analyze_1 ();
1465 }
1466
1467
1468 /* Return the number of basic blocks from the last call to df_analyze. */
1469
1470 int
1471 df_get_n_blocks (enum df_flow_dir dir)
1472 {
1473 gcc_assert (dir != DF_NONE);
1474
1475 if (dir == DF_FORWARD)
1476 {
1477 gcc_assert (df->postorder_inverted);
1478 return df->n_blocks_inverted;
1479 }
1480
1481 gcc_assert (df->postorder);
1482 return df->n_blocks;
1483 }
1484
1485
1486 /* Return a pointer to the array of basic blocks in the reverse postorder.
1487 Depending on the direction of the dataflow problem,
1488 it returns either the usual reverse postorder array
1489 or the reverse postorder of inverted traversal. */
1490 int *
1491 df_get_postorder (enum df_flow_dir dir)
1492 {
1493 gcc_assert (dir != DF_NONE);
1494
1495 if (dir == DF_FORWARD)
1496 {
1497 gcc_assert (df->postorder_inverted);
1498 return df->postorder_inverted;
1499 }
1500 gcc_assert (df->postorder);
1501 return df->postorder;
1502 }
1503
1504 static struct df_problem user_problem;
1505 static struct dataflow user_dflow;
1506
1507 /* Interface for calling iterative dataflow with user defined
1508 confluence and transfer functions. All that is necessary is to
1509 supply DIR, a direction, CONF_FUN_0, a confluence function for
1510 blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1511 confluence function, TRANS_FUN, the basic block transfer function,
1512 and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1513 postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1514
1515 void
1516 df_simple_dataflow (enum df_flow_dir dir,
1517 df_init_function init_fun,
1518 df_confluence_function_0 con_fun_0,
1519 df_confluence_function_n con_fun_n,
1520 df_transfer_function trans_fun,
1521 bitmap blocks, int * postorder, int n_blocks)
1522 {
1523 memset (&user_problem, 0, sizeof (struct df_problem));
1524 user_problem.dir = dir;
1525 user_problem.init_fun = init_fun;
1526 user_problem.con_fun_0 = con_fun_0;
1527 user_problem.con_fun_n = con_fun_n;
1528 user_problem.trans_fun = trans_fun;
1529 user_dflow.problem = &user_problem;
1530 df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1531 }
1532
1533
1534 \f
1535 /*----------------------------------------------------------------------------
1536 Functions to support limited incremental change.
1537 ----------------------------------------------------------------------------*/
1538
1539
1540 /* Get basic block info. */
1541
1542 static void *
1543 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1544 {
1545 if (dflow->block_info == NULL)
1546 return NULL;
1547 if (index >= dflow->block_info_size)
1548 return NULL;
1549 return (void *)((char *)dflow->block_info
1550 + index * dflow->problem->block_info_elt_size);
1551 }
1552
1553
1554 /* Set basic block info. */
1555
1556 static void
1557 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1558 void *bb_info)
1559 {
1560 gcc_assert (dflow->block_info);
1561 memcpy ((char *)dflow->block_info
1562 + index * dflow->problem->block_info_elt_size,
1563 bb_info, dflow->problem->block_info_elt_size);
1564 }
1565
1566
1567 /* Clear basic block info. */
1568
1569 static void
1570 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1571 {
1572 gcc_assert (dflow->block_info);
1573 gcc_assert (dflow->block_info_size > index);
1574 memset ((char *)dflow->block_info
1575 + index * dflow->problem->block_info_elt_size,
1576 0, dflow->problem->block_info_elt_size);
1577 }
1578
1579
1580 /* Mark the solutions as being out of date. */
1581
1582 void
1583 df_mark_solutions_dirty (void)
1584 {
1585 if (df)
1586 {
1587 int p;
1588 for (p = 1; p < df->num_problems_defined; p++)
1589 df->problems_in_order[p]->solutions_dirty = true;
1590 }
1591 }
1592
1593
1594 /* Return true if BB needs it's transfer functions recomputed. */
1595
1596 bool
1597 df_get_bb_dirty (basic_block bb)
1598 {
1599 return bitmap_bit_p ((df_live
1600 ? df_live : df_lr)->out_of_date_transfer_functions,
1601 bb->index);
1602 }
1603
1604
1605 /* Mark BB as needing it's transfer functions as being out of
1606 date. */
1607
1608 void
1609 df_set_bb_dirty (basic_block bb)
1610 {
1611 bb->flags |= BB_MODIFIED;
1612 if (df)
1613 {
1614 int p;
1615 for (p = 1; p < df->num_problems_defined; p++)
1616 {
1617 struct dataflow *dflow = df->problems_in_order[p];
1618 if (dflow->out_of_date_transfer_functions)
1619 bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1620 }
1621 df_mark_solutions_dirty ();
1622 }
1623 }
1624
1625
1626 /* Grow the bb_info array. */
1627
1628 void
1629 df_grow_bb_info (struct dataflow *dflow)
1630 {
1631 unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1632 if (dflow->block_info_size < new_size)
1633 {
1634 new_size += new_size / 4;
1635 dflow->block_info
1636 = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1637 new_size
1638 * dflow->problem->block_info_elt_size);
1639 memset ((char *)dflow->block_info
1640 + dflow->block_info_size
1641 * dflow->problem->block_info_elt_size,
1642 0,
1643 (new_size - dflow->block_info_size)
1644 * dflow->problem->block_info_elt_size);
1645 dflow->block_info_size = new_size;
1646 }
1647 }
1648
1649
1650 /* Clear the dirty bits. This is called from places that delete
1651 blocks. */
1652 static void
1653 df_clear_bb_dirty (basic_block bb)
1654 {
1655 int p;
1656 for (p = 1; p < df->num_problems_defined; p++)
1657 {
1658 struct dataflow *dflow = df->problems_in_order[p];
1659 if (dflow->out_of_date_transfer_functions)
1660 bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1661 }
1662 }
1663
1664 /* Called from the rtl_compact_blocks to reorganize the problems basic
1665 block info. */
1666
1667 void
1668 df_compact_blocks (void)
1669 {
1670 int i, p;
1671 basic_block bb;
1672 void *problem_temps;
1673 bitmap_head tmp;
1674
1675 bitmap_initialize (&tmp, &df_bitmap_obstack);
1676 for (p = 0; p < df->num_problems_defined; p++)
1677 {
1678 struct dataflow *dflow = df->problems_in_order[p];
1679
1680 /* Need to reorganize the out_of_date_transfer_functions for the
1681 dflow problem. */
1682 if (dflow->out_of_date_transfer_functions)
1683 {
1684 bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
1685 bitmap_clear (dflow->out_of_date_transfer_functions);
1686 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1687 bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1688 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1689 bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1690
1691 i = NUM_FIXED_BLOCKS;
1692 FOR_EACH_BB_FN (bb, cfun)
1693 {
1694 if (bitmap_bit_p (&tmp, bb->index))
1695 bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1696 i++;
1697 }
1698 }
1699
1700 /* Now shuffle the block info for the problem. */
1701 if (dflow->problem->free_bb_fun)
1702 {
1703 int size = (last_basic_block_for_fn (cfun)
1704 * dflow->problem->block_info_elt_size);
1705 problem_temps = XNEWVAR (char, size);
1706 df_grow_bb_info (dflow);
1707 memcpy (problem_temps, dflow->block_info, size);
1708
1709 /* Copy the bb info from the problem tmps to the proper
1710 place in the block_info vector. Null out the copied
1711 item. The entry and exit blocks never move. */
1712 i = NUM_FIXED_BLOCKS;
1713 FOR_EACH_BB_FN (bb, cfun)
1714 {
1715 df_set_bb_info (dflow, i,
1716 (char *)problem_temps
1717 + bb->index * dflow->problem->block_info_elt_size);
1718 i++;
1719 }
1720 memset ((char *)dflow->block_info
1721 + i * dflow->problem->block_info_elt_size, 0,
1722 (last_basic_block_for_fn (cfun) - i)
1723 * dflow->problem->block_info_elt_size);
1724 free (problem_temps);
1725 }
1726 }
1727
1728 /* Shuffle the bits in the basic_block indexed arrays. */
1729
1730 if (df->blocks_to_analyze)
1731 {
1732 if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
1733 bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1734 if (bitmap_bit_p (&tmp, EXIT_BLOCK))
1735 bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1736 bitmap_copy (&tmp, df->blocks_to_analyze);
1737 bitmap_clear (df->blocks_to_analyze);
1738 i = NUM_FIXED_BLOCKS;
1739 FOR_EACH_BB_FN (bb, cfun)
1740 {
1741 if (bitmap_bit_p (&tmp, bb->index))
1742 bitmap_set_bit (df->blocks_to_analyze, i);
1743 i++;
1744 }
1745 }
1746
1747 bitmap_clear (&tmp);
1748
1749 i = NUM_FIXED_BLOCKS;
1750 FOR_EACH_BB_FN (bb, cfun)
1751 {
1752 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1753 bb->index = i;
1754 i++;
1755 }
1756
1757 gcc_assert (i == n_basic_blocks_for_fn (cfun));
1758
1759 for (; i < last_basic_block_for_fn (cfun); i++)
1760 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1761
1762 #ifdef DF_DEBUG_CFG
1763 if (!df_lr->solutions_dirty)
1764 df_set_clean_cfg ();
1765 #endif
1766 }
1767
1768
1769 /* Shove NEW_BLOCK in at OLD_INDEX. Called from ifcvt to hack a
1770 block. There is no excuse for people to do this kind of thing. */
1771
1772 void
1773 df_bb_replace (int old_index, basic_block new_block)
1774 {
1775 int new_block_index = new_block->index;
1776 int p;
1777
1778 if (dump_file)
1779 fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1780
1781 gcc_assert (df);
1782 gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1783
1784 for (p = 0; p < df->num_problems_defined; p++)
1785 {
1786 struct dataflow *dflow = df->problems_in_order[p];
1787 if (dflow->block_info)
1788 {
1789 df_grow_bb_info (dflow);
1790 df_set_bb_info (dflow, old_index,
1791 df_get_bb_info (dflow, new_block_index));
1792 }
1793 }
1794
1795 df_clear_bb_dirty (new_block);
1796 SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1797 new_block->index = old_index;
1798 df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1799 SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1800 }
1801
1802
1803 /* Free all of the per basic block dataflow from all of the problems.
1804 This is typically called before a basic block is deleted and the
1805 problem will be reanalyzed. */
1806
1807 void
1808 df_bb_delete (int bb_index)
1809 {
1810 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1811 int i;
1812
1813 if (!df)
1814 return;
1815
1816 for (i = 0; i < df->num_problems_defined; i++)
1817 {
1818 struct dataflow *dflow = df->problems_in_order[i];
1819 if (dflow->problem->free_bb_fun)
1820 {
1821 void *bb_info = df_get_bb_info (dflow, bb_index);
1822 if (bb_info)
1823 {
1824 dflow->problem->free_bb_fun (bb, bb_info);
1825 df_clear_bb_info (dflow, bb_index);
1826 }
1827 }
1828 }
1829 df_clear_bb_dirty (bb);
1830 df_mark_solutions_dirty ();
1831 }
1832
1833
1834 /* Verify that there is a place for everything and everything is in
1835 its place. This is too expensive to run after every pass in the
1836 mainline. However this is an excellent debugging tool if the
1837 dataflow information is not being updated properly. You can just
1838 sprinkle calls in until you find the place that is changing an
1839 underlying structure without calling the proper updating
1840 routine. */
1841
1842 void
1843 df_verify (void)
1844 {
1845 df_scan_verify ();
1846 #ifdef ENABLE_DF_CHECKING
1847 df_lr_verify_transfer_functions ();
1848 if (df_live)
1849 df_live_verify_transfer_functions ();
1850 #endif
1851 }
1852
1853 #ifdef DF_DEBUG_CFG
1854
1855 /* Compute an array of ints that describes the cfg. This can be used
1856 to discover places where the cfg is modified by the appropriate
1857 calls have not been made to the keep df informed. The internals of
1858 this are unexciting, the key is that two instances of this can be
1859 compared to see if any changes have been made to the cfg. */
1860
1861 static int *
1862 df_compute_cfg_image (void)
1863 {
1864 basic_block bb;
1865 int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1866 int i;
1867 int * map;
1868
1869 FOR_ALL_BB_FN (bb, cfun)
1870 {
1871 size += EDGE_COUNT (bb->succs);
1872 }
1873
1874 map = XNEWVEC (int, size);
1875 map[0] = size;
1876 i = 1;
1877 FOR_ALL_BB_FN (bb, cfun)
1878 {
1879 edge_iterator ei;
1880 edge e;
1881
1882 map[i++] = bb->index;
1883 FOR_EACH_EDGE (e, ei, bb->succs)
1884 map[i++] = e->dest->index;
1885 map[i++] = -1;
1886 }
1887 map[i] = -1;
1888 return map;
1889 }
1890
1891 static int *saved_cfg = NULL;
1892
1893
1894 /* This function compares the saved version of the cfg with the
1895 current cfg and aborts if the two are identical. The function
1896 silently returns if the cfg has been marked as dirty or the two are
1897 the same. */
1898
1899 void
1900 df_check_cfg_clean (void)
1901 {
1902 int *new_map;
1903
1904 if (!df)
1905 return;
1906
1907 if (df_lr->solutions_dirty)
1908 return;
1909
1910 if (saved_cfg == NULL)
1911 return;
1912
1913 new_map = df_compute_cfg_image ();
1914 gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1915 free (new_map);
1916 }
1917
1918
1919 /* This function builds a cfg fingerprint and squirrels it away in
1920 saved_cfg. */
1921
1922 static void
1923 df_set_clean_cfg (void)
1924 {
1925 free (saved_cfg);
1926 saved_cfg = df_compute_cfg_image ();
1927 }
1928
1929 #endif /* DF_DEBUG_CFG */
1930 /*----------------------------------------------------------------------------
1931 PUBLIC INTERFACES TO QUERY INFORMATION.
1932 ----------------------------------------------------------------------------*/
1933
1934
1935 /* Return first def of REGNO within BB. */
1936
1937 df_ref
1938 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1939 {
1940 rtx insn;
1941 df_ref *def_rec;
1942 unsigned int uid;
1943
1944 FOR_BB_INSNS (bb, insn)
1945 {
1946 if (!INSN_P (insn))
1947 continue;
1948
1949 uid = INSN_UID (insn);
1950 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1951 {
1952 df_ref def = *def_rec;
1953 if (DF_REF_REGNO (def) == regno)
1954 return def;
1955 }
1956 }
1957 return NULL;
1958 }
1959
1960
1961 /* Return last def of REGNO within BB. */
1962
1963 df_ref
1964 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1965 {
1966 rtx insn;
1967 df_ref *def_rec;
1968 unsigned int uid;
1969
1970 FOR_BB_INSNS_REVERSE (bb, insn)
1971 {
1972 if (!INSN_P (insn))
1973 continue;
1974
1975 uid = INSN_UID (insn);
1976 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
1977 {
1978 df_ref def = *def_rec;
1979 if (DF_REF_REGNO (def) == regno)
1980 return def;
1981 }
1982 }
1983
1984 return NULL;
1985 }
1986
1987 /* Finds the reference corresponding to the definition of REG in INSN.
1988 DF is the dataflow object. */
1989
1990 df_ref
1991 df_find_def (rtx insn, rtx reg)
1992 {
1993 unsigned int uid;
1994 df_ref *def_rec;
1995
1996 if (GET_CODE (reg) == SUBREG)
1997 reg = SUBREG_REG (reg);
1998 gcc_assert (REG_P (reg));
1999
2000 uid = INSN_UID (insn);
2001 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
2002 {
2003 df_ref def = *def_rec;
2004 if (DF_REF_REGNO (def) == REGNO (reg))
2005 return def;
2006 }
2007
2008 return NULL;
2009 }
2010
2011
2012 /* Return true if REG is defined in INSN, zero otherwise. */
2013
2014 bool
2015 df_reg_defined (rtx insn, rtx reg)
2016 {
2017 return df_find_def (insn, reg) != NULL;
2018 }
2019
2020
2021 /* Finds the reference corresponding to the use of REG in INSN.
2022 DF is the dataflow object. */
2023
2024 df_ref
2025 df_find_use (rtx insn, rtx reg)
2026 {
2027 unsigned int uid;
2028 df_ref *use_rec;
2029
2030 if (GET_CODE (reg) == SUBREG)
2031 reg = SUBREG_REG (reg);
2032 gcc_assert (REG_P (reg));
2033
2034 uid = INSN_UID (insn);
2035 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
2036 {
2037 df_ref use = *use_rec;
2038 if (DF_REF_REGNO (use) == REGNO (reg))
2039 return use;
2040 }
2041 if (df->changeable_flags & DF_EQ_NOTES)
2042 for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
2043 {
2044 df_ref use = *use_rec;
2045 if (DF_REF_REGNO (use) == REGNO (reg))
2046 return use;
2047 }
2048 return NULL;
2049 }
2050
2051
2052 /* Return true if REG is referenced in INSN, zero otherwise. */
2053
2054 bool
2055 df_reg_used (rtx insn, rtx reg)
2056 {
2057 return df_find_use (insn, reg) != NULL;
2058 }
2059
2060 \f
2061 /*----------------------------------------------------------------------------
2062 Debugging and printing functions.
2063 ----------------------------------------------------------------------------*/
2064
2065 /* Write information about registers and basic blocks into FILE.
2066 This is part of making a debugging dump. */
2067
2068 void
2069 dump_regset (regset r, FILE *outf)
2070 {
2071 unsigned i;
2072 reg_set_iterator rsi;
2073
2074 if (r == NULL)
2075 {
2076 fputs (" (nil)", outf);
2077 return;
2078 }
2079
2080 EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2081 {
2082 fprintf (outf, " %d", i);
2083 if (i < FIRST_PSEUDO_REGISTER)
2084 fprintf (outf, " [%s]",
2085 reg_names[i]);
2086 }
2087 }
2088
2089 /* Print a human-readable representation of R on the standard error
2090 stream. This function is designed to be used from within the
2091 debugger. */
2092 extern void debug_regset (regset);
2093 DEBUG_FUNCTION void
2094 debug_regset (regset r)
2095 {
2096 dump_regset (r, stderr);
2097 putc ('\n', stderr);
2098 }
2099
2100 /* Write information about registers and basic blocks into FILE.
2101 This is part of making a debugging dump. */
2102
2103 void
2104 df_print_regset (FILE *file, bitmap r)
2105 {
2106 unsigned int i;
2107 bitmap_iterator bi;
2108
2109 if (r == NULL)
2110 fputs (" (nil)", file);
2111 else
2112 {
2113 EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2114 {
2115 fprintf (file, " %d", i);
2116 if (i < FIRST_PSEUDO_REGISTER)
2117 fprintf (file, " [%s]", reg_names[i]);
2118 }
2119 }
2120 fprintf (file, "\n");
2121 }
2122
2123
2124 /* Write information about registers and basic blocks into FILE. The
2125 bitmap is in the form used by df_byte_lr. This is part of making a
2126 debugging dump. */
2127
2128 void
2129 df_print_word_regset (FILE *file, bitmap r)
2130 {
2131 unsigned int max_reg = max_reg_num ();
2132
2133 if (r == NULL)
2134 fputs (" (nil)", file);
2135 else
2136 {
2137 unsigned int i;
2138 for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2139 {
2140 bool found = (bitmap_bit_p (r, 2 * i)
2141 || bitmap_bit_p (r, 2 * i + 1));
2142 if (found)
2143 {
2144 int word;
2145 const char * sep = "";
2146 fprintf (file, " %d", i);
2147 fprintf (file, "(");
2148 for (word = 0; word < 2; word++)
2149 if (bitmap_bit_p (r, 2 * i + word))
2150 {
2151 fprintf (file, "%s%d", sep, word);
2152 sep = ", ";
2153 }
2154 fprintf (file, ")");
2155 }
2156 }
2157 }
2158 fprintf (file, "\n");
2159 }
2160
2161
2162 /* Dump dataflow info. */
2163
2164 void
2165 df_dump (FILE *file)
2166 {
2167 basic_block bb;
2168 df_dump_start (file);
2169
2170 FOR_ALL_BB_FN (bb, cfun)
2171 {
2172 df_print_bb_index (bb, file);
2173 df_dump_top (bb, file);
2174 df_dump_bottom (bb, file);
2175 }
2176
2177 fprintf (file, "\n");
2178 }
2179
2180
2181 /* Dump dataflow info for df->blocks_to_analyze. */
2182
2183 void
2184 df_dump_region (FILE *file)
2185 {
2186 if (df->blocks_to_analyze)
2187 {
2188 bitmap_iterator bi;
2189 unsigned int bb_index;
2190
2191 fprintf (file, "\n\nstarting region dump\n");
2192 df_dump_start (file);
2193
2194 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2195 {
2196 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2197 dump_bb (file, bb, 0, TDF_DETAILS);
2198 }
2199 fprintf (file, "\n");
2200 }
2201 else
2202 df_dump (file);
2203 }
2204
2205
2206 /* Dump the introductory information for each problem defined. */
2207
2208 void
2209 df_dump_start (FILE *file)
2210 {
2211 int i;
2212
2213 if (!df || !file)
2214 return;
2215
2216 fprintf (file, "\n\n%s\n", current_function_name ());
2217 fprintf (file, "\nDataflow summary:\n");
2218 if (df->blocks_to_analyze)
2219 fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2220 DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2221
2222 for (i = 0; i < df->num_problems_defined; i++)
2223 {
2224 struct dataflow *dflow = df->problems_in_order[i];
2225 if (dflow->computed)
2226 {
2227 df_dump_problem_function fun = dflow->problem->dump_start_fun;
2228 if (fun)
2229 fun (file);
2230 }
2231 }
2232 }
2233
2234
2235 /* Dump the top or bottom of the block information for BB. */
2236 static void
2237 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2238 {
2239 int i;
2240
2241 if (!df || !file)
2242 return;
2243
2244 for (i = 0; i < df->num_problems_defined; i++)
2245 {
2246 struct dataflow *dflow = df->problems_in_order[i];
2247 if (dflow->computed)
2248 {
2249 df_dump_bb_problem_function bbfun;
2250
2251 if (top)
2252 bbfun = dflow->problem->dump_top_fun;
2253 else
2254 bbfun = dflow->problem->dump_bottom_fun;
2255
2256 if (bbfun)
2257 bbfun (bb, file);
2258 }
2259 }
2260 }
2261
2262 /* Dump the top of the block information for BB. */
2263
2264 void
2265 df_dump_top (basic_block bb, FILE *file)
2266 {
2267 df_dump_bb_problem_data (bb, file, /*top=*/true);
2268 }
2269
2270 /* Dump the bottom of the block information for BB. */
2271
2272 void
2273 df_dump_bottom (basic_block bb, FILE *file)
2274 {
2275 df_dump_bb_problem_data (bb, file, /*top=*/false);
2276 }
2277
2278
2279 /* Dump information about INSN just before or after dumping INSN itself. */
2280 static void
2281 df_dump_insn_problem_data (const_rtx insn, FILE *file, bool top)
2282 {
2283 int i;
2284
2285 if (!df || !file)
2286 return;
2287
2288 for (i = 0; i < df->num_problems_defined; i++)
2289 {
2290 struct dataflow *dflow = df->problems_in_order[i];
2291 if (dflow->computed)
2292 {
2293 df_dump_insn_problem_function insnfun;
2294
2295 if (top)
2296 insnfun = dflow->problem->dump_insn_top_fun;
2297 else
2298 insnfun = dflow->problem->dump_insn_bottom_fun;
2299
2300 if (insnfun)
2301 insnfun (insn, file);
2302 }
2303 }
2304 }
2305
2306 /* Dump information about INSN before dumping INSN itself. */
2307
2308 void
2309 df_dump_insn_top (const_rtx insn, FILE *file)
2310 {
2311 df_dump_insn_problem_data (insn, file, /*top=*/true);
2312 }
2313
2314 /* Dump information about INSN after dumping INSN itself. */
2315
2316 void
2317 df_dump_insn_bottom (const_rtx insn, FILE *file)
2318 {
2319 df_dump_insn_problem_data (insn, file, /*top=*/false);
2320 }
2321
2322
2323 static void
2324 df_ref_dump (df_ref ref, FILE *file)
2325 {
2326 fprintf (file, "%c%d(%d)",
2327 DF_REF_REG_DEF_P (ref)
2328 ? 'd'
2329 : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2330 DF_REF_ID (ref),
2331 DF_REF_REGNO (ref));
2332 }
2333
2334 void
2335 df_refs_chain_dump (df_ref *ref_rec, bool follow_chain, FILE *file)
2336 {
2337 fprintf (file, "{ ");
2338 while (*ref_rec)
2339 {
2340 df_ref ref = *ref_rec;
2341 df_ref_dump (ref, file);
2342 if (follow_chain)
2343 df_chain_dump (DF_REF_CHAIN (ref), file);
2344 ref_rec++;
2345 }
2346 fprintf (file, "}");
2347 }
2348
2349
2350 /* Dump either a ref-def or reg-use chain. */
2351
2352 void
2353 df_regs_chain_dump (df_ref ref, FILE *file)
2354 {
2355 fprintf (file, "{ ");
2356 while (ref)
2357 {
2358 df_ref_dump (ref, file);
2359 ref = DF_REF_NEXT_REG (ref);
2360 }
2361 fprintf (file, "}");
2362 }
2363
2364
2365 static void
2366 df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
2367 {
2368 while (*mws)
2369 {
2370 fprintf (file, "mw %c r[%d..%d]\n",
2371 (DF_MWS_REG_DEF_P (*mws)) ? 'd' : 'u',
2372 (*mws)->start_regno, (*mws)->end_regno);
2373 mws++;
2374 }
2375 }
2376
2377
2378 static void
2379 df_insn_uid_debug (unsigned int uid,
2380 bool follow_chain, FILE *file)
2381 {
2382 fprintf (file, "insn %d luid %d",
2383 uid, DF_INSN_UID_LUID (uid));
2384
2385 if (DF_INSN_UID_DEFS (uid))
2386 {
2387 fprintf (file, " defs ");
2388 df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2389 }
2390
2391 if (DF_INSN_UID_USES (uid))
2392 {
2393 fprintf (file, " uses ");
2394 df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2395 }
2396
2397 if (DF_INSN_UID_EQ_USES (uid))
2398 {
2399 fprintf (file, " eq uses ");
2400 df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2401 }
2402
2403 if (DF_INSN_UID_MWS (uid))
2404 {
2405 fprintf (file, " mws ");
2406 df_mws_dump (DF_INSN_UID_MWS (uid), file);
2407 }
2408 fprintf (file, "\n");
2409 }
2410
2411
2412 DEBUG_FUNCTION void
2413 df_insn_debug (rtx insn, bool follow_chain, FILE *file)
2414 {
2415 df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2416 }
2417
2418 DEBUG_FUNCTION void
2419 df_insn_debug_regno (rtx insn, FILE *file)
2420 {
2421 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2422
2423 fprintf (file, "insn %d bb %d luid %d defs ",
2424 INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2425 DF_INSN_INFO_LUID (insn_info));
2426 df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2427
2428 fprintf (file, " uses ");
2429 df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2430
2431 fprintf (file, " eq_uses ");
2432 df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2433 fprintf (file, "\n");
2434 }
2435
2436 DEBUG_FUNCTION void
2437 df_regno_debug (unsigned int regno, FILE *file)
2438 {
2439 fprintf (file, "reg %d defs ", regno);
2440 df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2441 fprintf (file, " uses ");
2442 df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2443 fprintf (file, " eq_uses ");
2444 df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2445 fprintf (file, "\n");
2446 }
2447
2448
2449 DEBUG_FUNCTION void
2450 df_ref_debug (df_ref ref, FILE *file)
2451 {
2452 fprintf (file, "%c%d ",
2453 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2454 DF_REF_ID (ref));
2455 fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2456 DF_REF_REGNO (ref),
2457 DF_REF_BBNO (ref),
2458 DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2459 DF_REF_FLAGS (ref),
2460 DF_REF_TYPE (ref));
2461 if (DF_REF_LOC (ref))
2462 {
2463 if (flag_dump_noaddr)
2464 fprintf (file, "loc #(#) chain ");
2465 else
2466 fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2467 (void *)*DF_REF_LOC (ref));
2468 }
2469 else
2470 fprintf (file, "chain ");
2471 df_chain_dump (DF_REF_CHAIN (ref), file);
2472 fprintf (file, "\n");
2473 }
2474 \f
2475 /* Functions for debugging from GDB. */
2476
2477 DEBUG_FUNCTION void
2478 debug_df_insn (rtx insn)
2479 {
2480 df_insn_debug (insn, true, stderr);
2481 debug_rtx (insn);
2482 }
2483
2484
2485 DEBUG_FUNCTION void
2486 debug_df_reg (rtx reg)
2487 {
2488 df_regno_debug (REGNO (reg), stderr);
2489 }
2490
2491
2492 DEBUG_FUNCTION void
2493 debug_df_regno (unsigned int regno)
2494 {
2495 df_regno_debug (regno, stderr);
2496 }
2497
2498
2499 DEBUG_FUNCTION void
2500 debug_df_ref (df_ref ref)
2501 {
2502 df_ref_debug (ref, stderr);
2503 }
2504
2505
2506 DEBUG_FUNCTION void
2507 debug_df_defno (unsigned int defno)
2508 {
2509 df_ref_debug (DF_DEFS_GET (defno), stderr);
2510 }
2511
2512
2513 DEBUG_FUNCTION void
2514 debug_df_useno (unsigned int defno)
2515 {
2516 df_ref_debug (DF_USES_GET (defno), stderr);
2517 }
2518
2519
2520 DEBUG_FUNCTION void
2521 debug_df_chain (struct df_link *link)
2522 {
2523 df_chain_dump (link, stderr);
2524 fputc ('\n', stderr);
2525 }