]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/df.c
Merge basic-improvements-branch to trunk
[thirdparty/gcc.git] / gcc / df.c
1 /* Dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3 Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
4 mhayes@redhat.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA.
22
23
24 OVERVIEW:
25
26 This file provides some dataflow routines for computing reaching defs,
27 upward exposed uses, live variables, def-use chains, and use-def
28 chains. The global dataflow is performed using simple iterative
29 methods with a worklist and could be sped up by ordering the blocks
30 with a depth first search order.
31
32 A `struct ref' data structure (ref) is allocated for every register
33 reference (def or use) and this records the insn and bb the ref is
34 found within. The refs are linked together in chains of uses and defs
35 for each insn and for each register. Each ref also has a chain field
36 that links all the use refs for a def or all the def refs for a use.
37 This is used to create use-def or def-use chains.
38
39
40 USAGE:
41
42 Here's an example of using the dataflow routines.
43
44 struct df *df;
45
46 df = df_init ();
47
48 df_analyse (df, 0, DF_ALL);
49
50 df_dump (df, DF_ALL, stderr);
51
52 df_finish (df);
53
54
55 df_init simply creates a poor man's object (df) that needs to be
56 passed to all the dataflow routines. df_finish destroys this
57 object and frees up any allocated memory.
58
59 df_analyse performs the following:
60
61 1. Records defs and uses by scanning the insns in each basic block
62 or by scanning the insns queued by df_insn_modify.
63 2. Links defs and uses into insn-def and insn-use chains.
64 3. Links defs and uses into reg-def and reg-use chains.
65 4. Assigns LUIDs to each insn (for modified blocks).
66 5. Calculates local reaching definitions.
67 6. Calculates global reaching definitions.
68 7. Creates use-def chains.
69 8. Calculates local reaching uses (upwards exposed uses).
70 9. Calculates global reaching uses.
71 10. Creates def-use chains.
72 11. Calculates local live registers.
73 12. Calculates global live registers.
74 13. Calculates register lifetimes and determines local registers.
75
76
77 PHILOSOPHY:
78
79 Note that the dataflow information is not updated for every newly
80 deleted or created insn. If the dataflow information requires
81 updating then all the changed, new, or deleted insns needs to be
82 marked with df_insn_modify (or df_insns_modify) either directly or
83 indirectly (say through calling df_insn_delete). df_insn_modify
84 marks all the modified insns to get processed the next time df_analyse
85 is called.
86
87 Beware that tinkering with insns may invalidate the dataflow information.
88 The philosophy behind these routines is that once the dataflow
89 information has been gathered, the user should store what they require
90 before they tinker with any insn. Once a reg is replaced, for example,
91 then the reg-def/reg-use chains will point to the wrong place. Once a
92 whole lot of changes have been made, df_analyse can be called again
93 to update the dataflow information. Currently, this is not very smart
94 with regard to propagating changes to the dataflow so it should not
95 be called very often.
96
97
98 DATA STRUCTURES:
99
100 The basic object is a REF (reference) and this may either be a DEF
101 (definition) or a USE of a register.
102
103 These are linked into a variety of lists; namely reg-def, reg-use,
104 insn-def, insn-use, def-use, and use-def lists. For example,
105 the reg-def lists contain all the refs that define a given register
106 while the insn-use lists contain all the refs used by an insn.
107
108 Note that the reg-def and reg-use chains are generally short (except for the
109 hard registers) and thus it is much faster to search these chains
110 rather than searching the def or use bitmaps.
111
112 If the insns are in SSA form then the reg-def and use-def lists
113 should only contain the single defining ref.
114
115 TODO:
116
117 1) Incremental dataflow analysis.
118
119 Note that if a loop invariant insn is hoisted (or sunk), we do not
120 need to change the def-use or use-def chains. All we have to do is to
121 change the bb field for all the associated defs and uses and to
122 renumber the LUIDs for the original and new basic blocks of the insn.
123
124 When shadowing loop mems we create new uses and defs for new pseudos
125 so we do not affect the existing dataflow information.
126
127 My current strategy is to queue up all modified, created, or deleted
128 insns so when df_analyse is called we can easily determine all the new
129 or deleted refs. Currently the global dataflow information is
130 recomputed from scratch but this could be propagated more efficiently.
131
132 2) Improved global data flow computation using depth first search.
133
134 3) Reduced memory requirements.
135
136 We could operate a pool of ref structures. When a ref is deleted it
137 gets returned to the pool (say by linking on to a chain of free refs).
138 This will require a pair of bitmaps for defs and uses so that we can
139 tell which ones have been changed. Alternatively, we could
140 periodically squeeze the def and use tables and associated bitmaps and
141 renumber the def and use ids.
142
143 4) Ordering of reg-def and reg-use lists.
144
145 Should the first entry in the def list be the first def (within a BB)?
146 Similarly, should the first entry in the use list be the last use
147 (within a BB)?
148
149 5) Working with a sub-CFG.
150
151 Often the whole CFG does not need to be analysed, for example,
152 when optimising a loop, only certain registers are of interest.
153 Perhaps there should be a bitmap argument to df_analyse to specify
154 which registers should be analysed? */
155
156 #include "config.h"
157 #include "system.h"
158 #include "coretypes.h"
159 #include "tm.h"
160 #include "rtl.h"
161 #include "tm_p.h"
162 #include "insn-config.h"
163 #include "recog.h"
164 #include "function.h"
165 #include "regs.h"
166 #include "obstack.h"
167 #include "hard-reg-set.h"
168 #include "basic-block.h"
169 #include "sbitmap.h"
170 #include "bitmap.h"
171 #include "df.h"
172 #include "fibheap.h"
173
174 #define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
175 do \
176 { \
177 unsigned int node_; \
178 EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
179 {(BB) = BASIC_BLOCK (node_); CODE;}); \
180 } \
181 while (0)
182
183 static struct obstack df_ref_obstack;
184 static struct df *ddf;
185
186 static void df_reg_table_realloc PARAMS((struct df *, int));
187 #if 0
188 static void df_def_table_realloc PARAMS((struct df *, int));
189 #endif
190 static void df_insn_table_realloc PARAMS((struct df *, unsigned int));
191 static void df_bitmaps_alloc PARAMS((struct df *, int));
192 static void df_bitmaps_free PARAMS((struct df *, int));
193 static void df_free PARAMS((struct df *));
194 static void df_alloc PARAMS((struct df *, int));
195
196 static rtx df_reg_clobber_gen PARAMS((unsigned int));
197 static rtx df_reg_use_gen PARAMS((unsigned int));
198
199 static inline struct df_link *df_link_create PARAMS((struct ref *,
200 struct df_link *));
201 static struct df_link *df_ref_unlink PARAMS((struct df_link **, struct ref *));
202 static void df_def_unlink PARAMS((struct df *, struct ref *));
203 static void df_use_unlink PARAMS((struct df *, struct ref *));
204 static void df_insn_refs_unlink PARAMS ((struct df *, basic_block, rtx));
205 #if 0
206 static void df_bb_refs_unlink PARAMS ((struct df *, basic_block));
207 static void df_refs_unlink PARAMS ((struct df *, bitmap));
208 #endif
209
210 static struct ref *df_ref_create PARAMS((struct df *,
211 rtx, rtx *, rtx,
212 enum df_ref_type, enum df_ref_flags));
213 static void df_ref_record_1 PARAMS((struct df *, rtx, rtx *,
214 rtx, enum df_ref_type,
215 enum df_ref_flags));
216 static void df_ref_record PARAMS((struct df *, rtx, rtx *,
217 rtx, enum df_ref_type,
218 enum df_ref_flags));
219 static void df_def_record_1 PARAMS((struct df *, rtx, basic_block, rtx));
220 static void df_defs_record PARAMS((struct df *, rtx, basic_block, rtx));
221 static void df_uses_record PARAMS((struct df *, rtx *,
222 enum df_ref_type, basic_block, rtx,
223 enum df_ref_flags));
224 static void df_insn_refs_record PARAMS((struct df *, basic_block, rtx));
225 static void df_bb_refs_record PARAMS((struct df *, basic_block));
226 static void df_refs_record PARAMS((struct df *, bitmap));
227
228 static void df_bb_reg_def_chain_create PARAMS((struct df *, basic_block));
229 static void df_reg_def_chain_create PARAMS((struct df *, bitmap));
230 static void df_bb_reg_use_chain_create PARAMS((struct df *, basic_block));
231 static void df_reg_use_chain_create PARAMS((struct df *, bitmap));
232 static void df_bb_du_chain_create PARAMS((struct df *, basic_block, bitmap));
233 static void df_du_chain_create PARAMS((struct df *, bitmap));
234 static void df_bb_ud_chain_create PARAMS((struct df *, basic_block));
235 static void df_ud_chain_create PARAMS((struct df *, bitmap));
236 static void df_bb_rd_local_compute PARAMS((struct df *, basic_block));
237 static void df_rd_local_compute PARAMS((struct df *, bitmap));
238 static void df_bb_ru_local_compute PARAMS((struct df *, basic_block));
239 static void df_ru_local_compute PARAMS((struct df *, bitmap));
240 static void df_bb_lr_local_compute PARAMS((struct df *, basic_block));
241 static void df_lr_local_compute PARAMS((struct df *, bitmap));
242 static void df_bb_reg_info_compute PARAMS((struct df *, basic_block, bitmap));
243 static void df_reg_info_compute PARAMS((struct df *, bitmap));
244
245 static int df_bb_luids_set PARAMS((struct df *df, basic_block));
246 static int df_luids_set PARAMS((struct df *df, bitmap));
247
248 static int df_modified_p PARAMS ((struct df *, bitmap));
249 static int df_refs_queue PARAMS ((struct df *));
250 static int df_refs_process PARAMS ((struct df *));
251 static int df_bb_refs_update PARAMS ((struct df *, basic_block));
252 static int df_refs_update PARAMS ((struct df *));
253 static void df_analyse_1 PARAMS((struct df *, bitmap, int, int));
254
255 static void df_insns_modify PARAMS((struct df *, basic_block,
256 rtx, rtx));
257 static int df_rtx_mem_replace PARAMS ((rtx *, void *));
258 static int df_rtx_reg_replace PARAMS ((rtx *, void *));
259 void df_refs_reg_replace PARAMS ((struct df *, bitmap,
260 struct df_link *, rtx, rtx));
261
262 static int df_def_dominates_all_uses_p PARAMS((struct df *, struct ref *def));
263 static int df_def_dominates_uses_p PARAMS((struct df *,
264 struct ref *def, bitmap));
265 static struct ref *df_bb_regno_last_use_find PARAMS((struct df *, basic_block,
266 unsigned int));
267 static struct ref *df_bb_regno_first_def_find PARAMS((struct df *, basic_block,
268 unsigned int));
269 static struct ref *df_bb_insn_regno_last_use_find PARAMS((struct df *,
270 basic_block,
271 rtx, unsigned int));
272 static struct ref *df_bb_insn_regno_first_def_find PARAMS((struct df *,
273 basic_block,
274 rtx, unsigned int));
275
276 static void df_chain_dump PARAMS((struct df_link *, FILE *file));
277 static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
278 static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
279 static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
280 static void df_rd_transfer_function PARAMS ((int, int *, bitmap, bitmap,
281 bitmap, bitmap, void *));
282 static void df_ru_transfer_function PARAMS ((int, int *, bitmap, bitmap,
283 bitmap, bitmap, void *));
284 static void df_lr_transfer_function PARAMS ((int, int *, bitmap, bitmap,
285 bitmap, bitmap, void *));
286 static void hybrid_search_bitmap PARAMS ((basic_block, bitmap *, bitmap *,
287 bitmap *, bitmap *, enum df_flow_dir,
288 enum df_confluence_op,
289 transfer_function_bitmap,
290 sbitmap, sbitmap, void *));
291 static void hybrid_search_sbitmap PARAMS ((basic_block, sbitmap *, sbitmap *,
292 sbitmap *, sbitmap *, enum df_flow_dir,
293 enum df_confluence_op,
294 transfer_function_sbitmap,
295 sbitmap, sbitmap, void *));
296 static inline bool read_modify_subreg_p PARAMS ((rtx));
297
298 \f
299 /* Local memory allocation/deallocation routines. */
300
301
302 /* Increase the insn info table to have space for at least SIZE + 1
303 elements. */
304 static void
305 df_insn_table_realloc (df, size)
306 struct df *df;
307 unsigned int size;
308 {
309 size++;
310 if (size <= df->insn_size)
311 return;
312
313 /* Make the table a little larger than requested, so we don't need
314 to enlarge it so often. */
315 size += df->insn_size / 4;
316
317 df->insns = (struct insn_info *)
318 xrealloc (df->insns, size * sizeof (struct insn_info));
319
320 memset (df->insns + df->insn_size, 0,
321 (size - df->insn_size) * sizeof (struct insn_info));
322
323 df->insn_size = size;
324
325 if (! df->insns_modified)
326 {
327 df->insns_modified = BITMAP_XMALLOC ();
328 bitmap_zero (df->insns_modified);
329 }
330 }
331
332
333 /* Increase the reg info table by SIZE more elements. */
334 static void
335 df_reg_table_realloc (df, size)
336 struct df *df;
337 int size;
338 {
339 /* Make table 25 percent larger by default. */
340 if (! size)
341 size = df->reg_size / 4;
342
343 size += df->reg_size;
344 if (size < max_reg_num ())
345 size = max_reg_num ();
346
347 df->regs = (struct reg_info *)
348 xrealloc (df->regs, size * sizeof (struct reg_info));
349
350 /* Zero the new entries. */
351 memset (df->regs + df->reg_size, 0,
352 (size - df->reg_size) * sizeof (struct reg_info));
353
354 df->reg_size = size;
355 }
356
357
358 #if 0
359 /* Not currently used. */
360 static void
361 df_def_table_realloc (df, size)
362 struct df *df;
363 int size;
364 {
365 int i;
366 struct ref *refs;
367
368 /* Make table 25 percent larger by default. */
369 if (! size)
370 size = df->def_size / 4;
371
372 df->def_size += size;
373 df->defs = xrealloc (df->defs,
374 df->def_size * sizeof (*df->defs));
375
376 /* Allocate a new block of memory and link into list of blocks
377 that will need to be freed later. */
378
379 refs = xmalloc (size * sizeof (*refs));
380
381 /* Link all the new refs together, overloading the chain field. */
382 for (i = 0; i < size - 1; i++)
383 refs[i].chain = (struct df_link *) (refs + i + 1);
384 refs[size - 1].chain = 0;
385 }
386 #endif
387
388
389
390 /* Allocate bitmaps for each basic block. */
391 static void
392 df_bitmaps_alloc (df, flags)
393 struct df *df;
394 int flags;
395 {
396 int dflags = 0;
397 basic_block bb;
398
399 /* Free the bitmaps if they need resizing. */
400 if ((flags & DF_LR) && df->n_regs < (unsigned int) max_reg_num ())
401 dflags |= DF_LR | DF_RU;
402 if ((flags & DF_RU) && df->n_uses < df->use_id)
403 dflags |= DF_RU;
404 if ((flags & DF_RD) && df->n_defs < df->def_id)
405 dflags |= DF_RD;
406
407 if (dflags)
408 df_bitmaps_free (df, dflags);
409
410 df->n_defs = df->def_id;
411 df->n_uses = df->use_id;
412
413 FOR_EACH_BB (bb)
414 {
415 struct bb_info *bb_info = DF_BB_INFO (df, bb);
416
417 if (flags & DF_RD && ! bb_info->rd_in)
418 {
419 /* Allocate bitmaps for reaching definitions. */
420 bb_info->rd_kill = BITMAP_XMALLOC ();
421 bitmap_zero (bb_info->rd_kill);
422 bb_info->rd_gen = BITMAP_XMALLOC ();
423 bitmap_zero (bb_info->rd_gen);
424 bb_info->rd_in = BITMAP_XMALLOC ();
425 bb_info->rd_out = BITMAP_XMALLOC ();
426 bb_info->rd_valid = 0;
427 }
428
429 if (flags & DF_RU && ! bb_info->ru_in)
430 {
431 /* Allocate bitmaps for upward exposed uses. */
432 bb_info->ru_kill = BITMAP_XMALLOC ();
433 bitmap_zero (bb_info->ru_kill);
434 /* Note the lack of symmetry. */
435 bb_info->ru_gen = BITMAP_XMALLOC ();
436 bitmap_zero (bb_info->ru_gen);
437 bb_info->ru_in = BITMAP_XMALLOC ();
438 bb_info->ru_out = BITMAP_XMALLOC ();
439 bb_info->ru_valid = 0;
440 }
441
442 if (flags & DF_LR && ! bb_info->lr_in)
443 {
444 /* Allocate bitmaps for live variables. */
445 bb_info->lr_def = BITMAP_XMALLOC ();
446 bitmap_zero (bb_info->lr_def);
447 bb_info->lr_use = BITMAP_XMALLOC ();
448 bitmap_zero (bb_info->lr_use);
449 bb_info->lr_in = BITMAP_XMALLOC ();
450 bb_info->lr_out = BITMAP_XMALLOC ();
451 bb_info->lr_valid = 0;
452 }
453 }
454 }
455
456
457 /* Free bitmaps for each basic block. */
458 static void
459 df_bitmaps_free (df, flags)
460 struct df *df ATTRIBUTE_UNUSED;
461 int flags;
462 {
463 basic_block bb;
464
465 FOR_EACH_BB (bb)
466 {
467 struct bb_info *bb_info = DF_BB_INFO (df, bb);
468
469 if (!bb_info)
470 continue;
471
472 if ((flags & DF_RD) && bb_info->rd_in)
473 {
474 /* Free bitmaps for reaching definitions. */
475 BITMAP_XFREE (bb_info->rd_kill);
476 bb_info->rd_kill = NULL;
477 BITMAP_XFREE (bb_info->rd_gen);
478 bb_info->rd_gen = NULL;
479 BITMAP_XFREE (bb_info->rd_in);
480 bb_info->rd_in = NULL;
481 BITMAP_XFREE (bb_info->rd_out);
482 bb_info->rd_out = NULL;
483 }
484
485 if ((flags & DF_RU) && bb_info->ru_in)
486 {
487 /* Free bitmaps for upward exposed uses. */
488 BITMAP_XFREE (bb_info->ru_kill);
489 bb_info->ru_kill = NULL;
490 BITMAP_XFREE (bb_info->ru_gen);
491 bb_info->ru_gen = NULL;
492 BITMAP_XFREE (bb_info->ru_in);
493 bb_info->ru_in = NULL;
494 BITMAP_XFREE (bb_info->ru_out);
495 bb_info->ru_out = NULL;
496 }
497
498 if ((flags & DF_LR) && bb_info->lr_in)
499 {
500 /* Free bitmaps for live variables. */
501 BITMAP_XFREE (bb_info->lr_def);
502 bb_info->lr_def = NULL;
503 BITMAP_XFREE (bb_info->lr_use);
504 bb_info->lr_use = NULL;
505 BITMAP_XFREE (bb_info->lr_in);
506 bb_info->lr_in = NULL;
507 BITMAP_XFREE (bb_info->lr_out);
508 bb_info->lr_out = NULL;
509 }
510 }
511 df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR));
512 }
513
514
515 /* Allocate and initialize dataflow memory. */
516 static void
517 df_alloc (df, n_regs)
518 struct df *df;
519 int n_regs;
520 {
521 int n_insns;
522 basic_block bb;
523
524 gcc_obstack_init (&df_ref_obstack);
525
526 /* Perhaps we should use LUIDs to save memory for the insn_refs
527 table. This is only a small saving; a few pointers. */
528 n_insns = get_max_uid () + 1;
529
530 df->def_id = 0;
531 df->n_defs = 0;
532 /* Approximate number of defs by number of insns. */
533 df->def_size = n_insns;
534 df->defs = xmalloc (df->def_size * sizeof (*df->defs));
535
536 df->use_id = 0;
537 df->n_uses = 0;
538 /* Approximate number of uses by twice number of insns. */
539 df->use_size = n_insns * 2;
540 df->uses = xmalloc (df->use_size * sizeof (*df->uses));
541
542 df->n_regs = n_regs;
543 df->n_bbs = last_basic_block;
544
545 /* Allocate temporary working array used during local dataflow analysis. */
546 df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
547
548 df_insn_table_realloc (df, n_insns);
549
550 df_reg_table_realloc (df, df->n_regs);
551
552 df->bbs_modified = BITMAP_XMALLOC ();
553 bitmap_zero (df->bbs_modified);
554
555 df->flags = 0;
556
557 df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
558
559 df->all_blocks = BITMAP_XMALLOC ();
560 FOR_EACH_BB (bb)
561 bitmap_set_bit (df->all_blocks, bb->index);
562 }
563
564
565 /* Free all the dataflow info. */
566 static void
567 df_free (df)
568 struct df *df;
569 {
570 df_bitmaps_free (df, DF_ALL);
571
572 if (df->bbs)
573 free (df->bbs);
574 df->bbs = 0;
575
576 if (df->insns)
577 free (df->insns);
578 df->insns = 0;
579 df->insn_size = 0;
580
581 if (df->defs)
582 free (df->defs);
583 df->defs = 0;
584 df->def_size = 0;
585 df->def_id = 0;
586
587 if (df->uses)
588 free (df->uses);
589 df->uses = 0;
590 df->use_size = 0;
591 df->use_id = 0;
592
593 if (df->regs)
594 free (df->regs);
595 df->regs = 0;
596 df->reg_size = 0;
597
598 if (df->bbs_modified)
599 BITMAP_XFREE (df->bbs_modified);
600 df->bbs_modified = 0;
601
602 if (df->insns_modified)
603 BITMAP_XFREE (df->insns_modified);
604 df->insns_modified = 0;
605
606 BITMAP_XFREE (df->all_blocks);
607 df->all_blocks = 0;
608
609 obstack_free (&df_ref_obstack, NULL);
610 }
611 \f
612 /* Local miscellaneous routines. */
613
614 /* Return a USE for register REGNO. */
615 static rtx df_reg_use_gen (regno)
616 unsigned int regno;
617 {
618 rtx reg;
619 rtx use;
620
621 reg = regno_reg_rtx[regno];
622
623 use = gen_rtx_USE (GET_MODE (reg), reg);
624 return use;
625 }
626
627
628 /* Return a CLOBBER for register REGNO. */
629 static rtx df_reg_clobber_gen (regno)
630 unsigned int regno;
631 {
632 rtx reg;
633 rtx use;
634
635 reg = regno_reg_rtx[regno];
636
637 use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
638 return use;
639 }
640 \f
641 /* Local chain manipulation routines. */
642
643 /* Create a link in a def-use or use-def chain. */
644 static inline struct df_link *
645 df_link_create (ref, next)
646 struct ref *ref;
647 struct df_link *next;
648 {
649 struct df_link *link;
650
651 link = (struct df_link *) obstack_alloc (&df_ref_obstack,
652 sizeof (*link));
653 link->next = next;
654 link->ref = ref;
655 return link;
656 }
657
658
659 /* Add REF to chain head pointed to by PHEAD. */
660 static struct df_link *
661 df_ref_unlink (phead, ref)
662 struct df_link **phead;
663 struct ref *ref;
664 {
665 struct df_link *link = *phead;
666
667 if (link)
668 {
669 if (! link->next)
670 {
671 /* Only a single ref. It must be the one we want.
672 If not, the def-use and use-def chains are likely to
673 be inconsistent. */
674 if (link->ref != ref)
675 abort ();
676 /* Now have an empty chain. */
677 *phead = NULL;
678 }
679 else
680 {
681 /* Multiple refs. One of them must be us. */
682 if (link->ref == ref)
683 *phead = link->next;
684 else
685 {
686 /* Follow chain. */
687 for (; link->next; link = link->next)
688 {
689 if (link->next->ref == ref)
690 {
691 /* Unlink from list. */
692 link->next = link->next->next;
693 return link->next;
694 }
695 }
696 }
697 }
698 }
699 return link;
700 }
701
702
703 /* Unlink REF from all def-use/use-def chains, etc. */
704 int
705 df_ref_remove (df, ref)
706 struct df *df;
707 struct ref *ref;
708 {
709 if (DF_REF_REG_DEF_P (ref))
710 {
711 df_def_unlink (df, ref);
712 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref);
713 }
714 else
715 {
716 df_use_unlink (df, ref);
717 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref);
718 }
719 return 1;
720 }
721
722
723 /* Unlink DEF from use-def and reg-def chains. */
724 static void
725 df_def_unlink (df, def)
726 struct df *df ATTRIBUTE_UNUSED;
727 struct ref *def;
728 {
729 struct df_link *du_link;
730 unsigned int dregno = DF_REF_REGNO (def);
731
732 /* Follow def-use chain to find all the uses of this def. */
733 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
734 {
735 struct ref *use = du_link->ref;
736
737 /* Unlink this def from the use-def chain. */
738 df_ref_unlink (&DF_REF_CHAIN (use), def);
739 }
740 DF_REF_CHAIN (def) = 0;
741
742 /* Unlink def from reg-def chain. */
743 df_ref_unlink (&df->regs[dregno].defs, def);
744
745 df->defs[DF_REF_ID (def)] = 0;
746 }
747
748
749 /* Unlink use from def-use and reg-use chains. */
750 static void
751 df_use_unlink (df, use)
752 struct df *df ATTRIBUTE_UNUSED;
753 struct ref *use;
754 {
755 struct df_link *ud_link;
756 unsigned int uregno = DF_REF_REGNO (use);
757
758 /* Follow use-def chain to find all the defs of this use. */
759 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
760 {
761 struct ref *def = ud_link->ref;
762
763 /* Unlink this use from the def-use chain. */
764 df_ref_unlink (&DF_REF_CHAIN (def), use);
765 }
766 DF_REF_CHAIN (use) = 0;
767
768 /* Unlink use from reg-use chain. */
769 df_ref_unlink (&df->regs[uregno].uses, use);
770
771 df->uses[DF_REF_ID (use)] = 0;
772 }
773 \f
774 /* Local routines for recording refs. */
775
776
777 /* Create a new ref of type DF_REF_TYPE for register REG at address
778 LOC within INSN of BB. */
779 static struct ref *
780 df_ref_create (df, reg, loc, insn, ref_type, ref_flags)
781 struct df *df;
782 rtx reg;
783 rtx *loc;
784 rtx insn;
785 enum df_ref_type ref_type;
786 enum df_ref_flags ref_flags;
787 {
788 struct ref *this_ref;
789
790 this_ref = (struct ref *) obstack_alloc (&df_ref_obstack,
791 sizeof (*this_ref));
792 DF_REF_REG (this_ref) = reg;
793 DF_REF_LOC (this_ref) = loc;
794 DF_REF_INSN (this_ref) = insn;
795 DF_REF_CHAIN (this_ref) = 0;
796 DF_REF_TYPE (this_ref) = ref_type;
797 DF_REF_FLAGS (this_ref) = ref_flags;
798
799 if (ref_type == DF_REF_REG_DEF)
800 {
801 if (df->def_id >= df->def_size)
802 {
803 /* Make table 25 percent larger. */
804 df->def_size += (df->def_size / 4);
805 df->defs = xrealloc (df->defs,
806 df->def_size * sizeof (*df->defs));
807 }
808 DF_REF_ID (this_ref) = df->def_id;
809 df->defs[df->def_id++] = this_ref;
810 }
811 else
812 {
813 if (df->use_id >= df->use_size)
814 {
815 /* Make table 25 percent larger. */
816 df->use_size += (df->use_size / 4);
817 df->uses = xrealloc (df->uses,
818 df->use_size * sizeof (*df->uses));
819 }
820 DF_REF_ID (this_ref) = df->use_id;
821 df->uses[df->use_id++] = this_ref;
822 }
823 return this_ref;
824 }
825
826
827 /* Create a new reference of type DF_REF_TYPE for a single register REG,
828 used inside the LOC rtx of INSN. */
829 static void
830 df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags)
831 struct df *df;
832 rtx reg;
833 rtx *loc;
834 rtx insn;
835 enum df_ref_type ref_type;
836 enum df_ref_flags ref_flags;
837 {
838 df_ref_create (df, reg, loc, insn, ref_type, ref_flags);
839 }
840
841
842 /* Create new references of type DF_REF_TYPE for each part of register REG
843 at address LOC within INSN of BB. */
844 static void
845 df_ref_record (df, reg, loc, insn, ref_type, ref_flags)
846 struct df *df;
847 rtx reg;
848 rtx *loc;
849 rtx insn;
850 enum df_ref_type ref_type;
851 enum df_ref_flags ref_flags;
852 {
853 unsigned int regno;
854
855 if (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG)
856 abort ();
857
858 /* For the reg allocator we are interested in some SUBREG rtx's, but not
859 all. Notably only those representing a word extraction from a multi-word
860 reg. As written in the docu those should have the form
861 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
862 XXX Is that true? We could also use the global word_mode variable. */
863 if (GET_CODE (reg) == SUBREG
864 && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
865 || GET_MODE_SIZE (GET_MODE (reg))
866 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
867 {
868 loc = &SUBREG_REG (reg);
869 reg = *loc;
870 }
871
872 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
873 if (regno < FIRST_PSEUDO_REGISTER)
874 {
875 int i;
876 int endregno;
877
878 if (! (df->flags & DF_HARD_REGS))
879 return;
880
881 /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
882 for the mode, because we only want to add references to regs, which
883 are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
884 reference the whole reg 0 in DI mode (which would also include
885 reg 1, at least, if 0 and 1 are SImode registers). */
886 endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
887 if (GET_CODE (reg) == SUBREG)
888 regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
889 SUBREG_BYTE (reg), GET_MODE (reg));
890 endregno += regno;
891
892 for (i = regno; i < endregno; i++)
893 df_ref_record_1 (df, regno_reg_rtx[i],
894 loc, insn, ref_type, ref_flags);
895 }
896 else
897 {
898 df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags);
899 }
900 }
901
902 /* Writes to paradoxical subregs, or subregs which are too narrow
903 are read-modify-write. */
904
905 static inline bool
906 read_modify_subreg_p (x)
907 rtx x;
908 {
909 unsigned int isize, osize;
910 if (GET_CODE (x) != SUBREG)
911 return false;
912 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
913 osize = GET_MODE_SIZE (GET_MODE (x));
914 if (isize <= osize)
915 return true;
916 if (isize <= UNITS_PER_WORD)
917 return false;
918 if (osize >= UNITS_PER_WORD)
919 return false;
920 return true;
921 }
922
923 /* Process all the registers defined in the rtx, X. */
924 static void
925 df_def_record_1 (df, x, bb, insn)
926 struct df *df;
927 rtx x;
928 basic_block bb;
929 rtx insn;
930 {
931 rtx *loc = &SET_DEST (x);
932 rtx dst = *loc;
933 enum df_ref_flags flags = 0;
934
935 /* Some targets place small structures in registers for
936 return values of functions. */
937 if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
938 {
939 int i;
940
941 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
942 df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn);
943 return;
944 }
945
946 #ifdef CLASS_CANNOT_CHANGE_MODE
947 if (GET_CODE (dst) == SUBREG
948 && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
949 GET_MODE (SUBREG_REG (dst))))
950 flags |= DF_REF_MODE_CHANGE;
951 #endif
952
953 /* May be, we should flag the use of strict_low_part somehow. Might be
954 handy for the reg allocator. */
955 while (GET_CODE (dst) == STRICT_LOW_PART
956 || GET_CODE (dst) == ZERO_EXTRACT
957 || GET_CODE (dst) == SIGN_EXTRACT
958 || read_modify_subreg_p (dst))
959 {
960 /* Strict low part always contains SUBREG, but we don't want to make
961 it appear outside, as whole register is always considered. */
962 if (GET_CODE (dst) == STRICT_LOW_PART)
963 {
964 loc = &XEXP (dst, 0);
965 dst = *loc;
966 }
967 #ifdef CLASS_CANNOT_CHANGE_MODE
968 if (GET_CODE (dst) == SUBREG
969 && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
970 GET_MODE (SUBREG_REG (dst))))
971 flags |= DF_REF_MODE_CHANGE;
972 #endif
973 loc = &XEXP (dst, 0);
974 dst = *loc;
975 flags |= DF_REF_READ_WRITE;
976 }
977
978 if (GET_CODE (dst) == REG
979 || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
980 df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
981 }
982
983
984 /* Process all the registers defined in the pattern rtx, X. */
985 static void
986 df_defs_record (df, x, bb, insn)
987 struct df *df;
988 rtx x;
989 basic_block bb;
990 rtx insn;
991 {
992 RTX_CODE code = GET_CODE (x);
993
994 if (code == SET || code == CLOBBER)
995 {
996 /* Mark the single def within the pattern. */
997 df_def_record_1 (df, x, bb, insn);
998 }
999 else if (code == PARALLEL)
1000 {
1001 int i;
1002
1003 /* Mark the multiple defs within the pattern. */
1004 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1005 {
1006 code = GET_CODE (XVECEXP (x, 0, i));
1007 if (code == SET || code == CLOBBER)
1008 df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn);
1009 }
1010 }
1011 }
1012
1013
1014 /* Process all the registers used in the rtx at address LOC. */
1015 static void
1016 df_uses_record (df, loc, ref_type, bb, insn, flags)
1017 struct df *df;
1018 rtx *loc;
1019 enum df_ref_type ref_type;
1020 basic_block bb;
1021 rtx insn;
1022 enum df_ref_flags flags;
1023 {
1024 RTX_CODE code;
1025 rtx x;
1026 retry:
1027 x = *loc;
1028 if (!x)
1029 return;
1030 code = GET_CODE (x);
1031 switch (code)
1032 {
1033 case LABEL_REF:
1034 case SYMBOL_REF:
1035 case CONST_INT:
1036 case CONST:
1037 case CONST_DOUBLE:
1038 case CONST_VECTOR:
1039 case PC:
1040 case ADDR_VEC:
1041 case ADDR_DIFF_VEC:
1042 return;
1043
1044 case CLOBBER:
1045 /* If we are clobbering a MEM, mark any registers inside the address
1046 as being used. */
1047 if (GET_CODE (XEXP (x, 0)) == MEM)
1048 df_uses_record (df, &XEXP (XEXP (x, 0), 0),
1049 DF_REF_REG_MEM_STORE, bb, insn, flags);
1050
1051 /* If we're clobbering a REG then we have a def so ignore. */
1052 return;
1053
1054 case MEM:
1055 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, flags);
1056 return;
1057
1058 case SUBREG:
1059 /* While we're here, optimize this case. */
1060
1061 /* In case the SUBREG is not of a register, don't optimize. */
1062 if (GET_CODE (SUBREG_REG (x)) != REG)
1063 {
1064 loc = &SUBREG_REG (x);
1065 df_uses_record (df, loc, ref_type, bb, insn, flags);
1066 return;
1067 }
1068 #ifdef CLASS_CANNOT_CHANGE_MODE
1069 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (x),
1070 GET_MODE (SUBREG_REG (x))))
1071 flags |= DF_REF_MODE_CHANGE;
1072 #endif
1073
1074 /* ... Fall through ... */
1075
1076 case REG:
1077 /* See a register (or subreg) other than being set. */
1078 df_ref_record (df, x, loc, insn, ref_type, flags);
1079 return;
1080
1081 case SET:
1082 {
1083 rtx dst = SET_DEST (x);
1084
1085 df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
1086
1087 switch (GET_CODE (dst))
1088 {
1089 enum df_ref_flags use_flags;
1090 case SUBREG:
1091 if (read_modify_subreg_p (dst))
1092 {
1093 use_flags = DF_REF_READ_WRITE;
1094 #ifdef CLASS_CANNOT_CHANGE_MODE
1095 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
1096 GET_MODE (SUBREG_REG (dst))))
1097 use_flags |= DF_REF_MODE_CHANGE;
1098 #endif
1099 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1100 insn, use_flags);
1101 break;
1102 }
1103 /* ... FALLTHRU ... */
1104 case REG:
1105 case PC:
1106 case PARALLEL:
1107 break;
1108 case MEM:
1109 df_uses_record (df, &XEXP (dst, 0),
1110 DF_REF_REG_MEM_STORE,
1111 bb, insn, 0);
1112 break;
1113 case STRICT_LOW_PART:
1114 /* A strict_low_part uses the whole reg not only the subreg. */
1115 dst = XEXP (dst, 0);
1116 if (GET_CODE (dst) != SUBREG)
1117 abort ();
1118 use_flags = DF_REF_READ_WRITE;
1119 #ifdef CLASS_CANNOT_CHANGE_MODE
1120 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
1121 GET_MODE (SUBREG_REG (dst))))
1122 use_flags |= DF_REF_MODE_CHANGE;
1123 #endif
1124 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1125 insn, use_flags);
1126 break;
1127 case ZERO_EXTRACT:
1128 case SIGN_EXTRACT:
1129 df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn,
1130 DF_REF_READ_WRITE);
1131 df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0);
1132 df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0);
1133 dst = XEXP (dst, 0);
1134 break;
1135 default:
1136 abort ();
1137 }
1138 return;
1139 }
1140
1141 case RETURN:
1142 break;
1143
1144 case ASM_OPERANDS:
1145 case UNSPEC_VOLATILE:
1146 case TRAP_IF:
1147 case ASM_INPUT:
1148 {
1149 /* Traditional and volatile asm instructions must be considered to use
1150 and clobber all hard registers, all pseudo-registers and all of
1151 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1152
1153 Consider for instance a volatile asm that changes the fpu rounding
1154 mode. An insn should not be moved across this even if it only uses
1155 pseudo-regs because it might give an incorrectly rounded result.
1156
1157 For now, just mark any regs we can find in ASM_OPERANDS as
1158 used. */
1159
1160 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1161 We can not just fall through here since then we would be confused
1162 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1163 traditional asms unlike their normal usage. */
1164 if (code == ASM_OPERANDS)
1165 {
1166 int j;
1167
1168 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1169 df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
1170 DF_REF_REG_USE, bb, insn, 0);
1171 return;
1172 }
1173 break;
1174 }
1175
1176 case PRE_DEC:
1177 case POST_DEC:
1178 case PRE_INC:
1179 case POST_INC:
1180 case PRE_MODIFY:
1181 case POST_MODIFY:
1182 /* Catch the def of the register being modified. */
1183 df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), insn, DF_REF_REG_DEF, DF_REF_READ_WRITE);
1184
1185 /* ... Fall through to handle uses ... */
1186
1187 default:
1188 break;
1189 }
1190
1191 /* Recursively scan the operands of this expression. */
1192 {
1193 const char *fmt = GET_RTX_FORMAT (code);
1194 int i;
1195
1196 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1197 {
1198 if (fmt[i] == 'e')
1199 {
1200 /* Tail recursive case: save a function call level. */
1201 if (i == 0)
1202 {
1203 loc = &XEXP (x, 0);
1204 goto retry;
1205 }
1206 df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags);
1207 }
1208 else if (fmt[i] == 'E')
1209 {
1210 int j;
1211 for (j = 0; j < XVECLEN (x, i); j++)
1212 df_uses_record (df, &XVECEXP (x, i, j), ref_type,
1213 bb, insn, flags);
1214 }
1215 }
1216 }
1217 }
1218
1219
1220 /* Record all the df within INSN of basic block BB. */
1221 static void
1222 df_insn_refs_record (df, bb, insn)
1223 struct df *df;
1224 basic_block bb;
1225 rtx insn;
1226 {
1227 int i;
1228
1229 if (INSN_P (insn))
1230 {
1231 rtx note;
1232
1233 /* Record register defs */
1234 df_defs_record (df, PATTERN (insn), bb, insn);
1235
1236 if (df->flags & DF_EQUIV_NOTES)
1237 for (note = REG_NOTES (insn); note;
1238 note = XEXP (note, 1))
1239 {
1240 switch (REG_NOTE_KIND (note))
1241 {
1242 case REG_EQUIV:
1243 case REG_EQUAL:
1244 df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
1245 bb, insn, 0);
1246 default:
1247 break;
1248 }
1249 }
1250
1251 if (GET_CODE (insn) == CALL_INSN)
1252 {
1253 rtx note;
1254 rtx x;
1255
1256 /* Record the registers used to pass arguments. */
1257 for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1258 note = XEXP (note, 1))
1259 {
1260 if (GET_CODE (XEXP (note, 0)) == USE)
1261 df_uses_record (df, &XEXP (XEXP (note, 0), 0), DF_REF_REG_USE,
1262 bb, insn, 0);
1263 }
1264
1265 /* The stack ptr is used (honorarily) by a CALL insn. */
1266 x = df_reg_use_gen (STACK_POINTER_REGNUM);
1267 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_USE, bb, insn, 0);
1268
1269 if (df->flags & DF_HARD_REGS)
1270 {
1271 /* Calls may also reference any of the global registers,
1272 so they are recorded as used. */
1273 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1274 if (global_regs[i])
1275 {
1276 x = df_reg_use_gen (i);
1277 df_uses_record (df, &SET_DEST (x),
1278 DF_REF_REG_USE, bb, insn, 0);
1279 }
1280 }
1281 }
1282
1283 /* Record the register uses. */
1284 df_uses_record (df, &PATTERN (insn),
1285 DF_REF_REG_USE, bb, insn, 0);
1286
1287
1288 if (GET_CODE (insn) == CALL_INSN)
1289 {
1290 rtx note;
1291
1292 if (df->flags & DF_HARD_REGS)
1293 {
1294 /* Kill all registers invalidated by a call. */
1295 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1296 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1297 {
1298 rtx reg_clob = df_reg_clobber_gen (i);
1299 df_defs_record (df, reg_clob, bb, insn);
1300 }
1301 }
1302
1303 /* There may be extra registers to be clobbered. */
1304 for (note = CALL_INSN_FUNCTION_USAGE (insn);
1305 note;
1306 note = XEXP (note, 1))
1307 if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1308 df_defs_record (df, XEXP (note, 0), bb, insn);
1309 }
1310 }
1311 }
1312
1313
1314 /* Record all the refs within the basic block BB. */
1315 static void
1316 df_bb_refs_record (df, bb)
1317 struct df *df;
1318 basic_block bb;
1319 {
1320 rtx insn;
1321
1322 /* Scan the block an insn at a time from beginning to end. */
1323 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1324 {
1325 if (INSN_P (insn))
1326 {
1327 /* Record defs within INSN. */
1328 df_insn_refs_record (df, bb, insn);
1329 }
1330 if (insn == bb->end)
1331 break;
1332 }
1333 }
1334
1335
1336 /* Record all the refs in the basic blocks specified by BLOCKS. */
1337 static void
1338 df_refs_record (df, blocks)
1339 struct df *df;
1340 bitmap blocks;
1341 {
1342 basic_block bb;
1343
1344 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1345 {
1346 df_bb_refs_record (df, bb);
1347 });
1348 }
1349 \f
1350 /* Dataflow analysis routines. */
1351
1352
1353 /* Create reg-def chains for basic block BB. These are a list of
1354 definitions for each register. */
1355 static void
1356 df_bb_reg_def_chain_create (df, bb)
1357 struct df *df;
1358 basic_block bb;
1359 {
1360 rtx insn;
1361
1362 /* Perhaps the defs should be sorted using a depth first search
1363 of the CFG (or possibly a breadth first search). We currently
1364 scan the basic blocks in reverse order so that the first defs
1365 appear at the start of the chain. */
1366
1367 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1368 insn = PREV_INSN (insn))
1369 {
1370 struct df_link *link;
1371 unsigned int uid = INSN_UID (insn);
1372
1373 if (! INSN_P (insn))
1374 continue;
1375
1376 for (link = df->insns[uid].defs; link; link = link->next)
1377 {
1378 struct ref *def = link->ref;
1379 unsigned int dregno = DF_REF_REGNO (def);
1380 /* Don't add ref's to the chain two times. I.e. only add
1381 new refs. XXX the same could be done by testing if the current
1382 insn is a modified (or a new) one. This would be faster. */
1383 if (DF_REF_ID (def) < df->def_id_save)
1384 continue;
1385
1386 df->regs[dregno].defs
1387 = df_link_create (def, df->regs[dregno].defs);
1388 }
1389 }
1390 }
1391
1392
1393 /* Create reg-def chains for each basic block within BLOCKS. These
1394 are a list of definitions for each register. */
1395 static void
1396 df_reg_def_chain_create (df, blocks)
1397 struct df *df;
1398 bitmap blocks;
1399 {
1400 basic_block bb;
1401
1402 FOR_EACH_BB_IN_BITMAP/*_REV*/ (blocks, 0, bb,
1403 {
1404 df_bb_reg_def_chain_create (df, bb);
1405 });
1406 }
1407
1408
1409 /* Create reg-use chains for basic block BB. These are a list of uses
1410 for each register. */
1411 static void
1412 df_bb_reg_use_chain_create (df, bb)
1413 struct df *df;
1414 basic_block bb;
1415 {
1416 rtx insn;
1417
1418 /* Scan in forward order so that the last uses appear at the
1419 start of the chain. */
1420
1421 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1422 insn = NEXT_INSN (insn))
1423 {
1424 struct df_link *link;
1425 unsigned int uid = INSN_UID (insn);
1426
1427 if (! INSN_P (insn))
1428 continue;
1429
1430 for (link = df->insns[uid].uses; link; link = link->next)
1431 {
1432 struct ref *use = link->ref;
1433 unsigned int uregno = DF_REF_REGNO (use);
1434 /* Don't add ref's to the chain two times. I.e. only add
1435 new refs. XXX the same could be done by testing if the current
1436 insn is a modified (or a new) one. This would be faster. */
1437 if (DF_REF_ID (use) < df->use_id_save)
1438 continue;
1439
1440 df->regs[uregno].uses
1441 = df_link_create (use, df->regs[uregno].uses);
1442 }
1443 }
1444 }
1445
1446
1447 /* Create reg-use chains for each basic block within BLOCKS. These
1448 are a list of uses for each register. */
1449 static void
1450 df_reg_use_chain_create (df, blocks)
1451 struct df *df;
1452 bitmap blocks;
1453 {
1454 basic_block bb;
1455
1456 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1457 {
1458 df_bb_reg_use_chain_create (df, bb);
1459 });
1460 }
1461
1462
1463 /* Create def-use chains from reaching use bitmaps for basic block BB. */
1464 static void
1465 df_bb_du_chain_create (df, bb, ru)
1466 struct df *df;
1467 basic_block bb;
1468 bitmap ru;
1469 {
1470 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1471 rtx insn;
1472
1473 bitmap_copy (ru, bb_info->ru_out);
1474
1475 /* For each def in BB create a linked list (chain) of uses
1476 reached from the def. */
1477 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1478 insn = PREV_INSN (insn))
1479 {
1480 struct df_link *def_link;
1481 struct df_link *use_link;
1482 unsigned int uid = INSN_UID (insn);
1483
1484 if (! INSN_P (insn))
1485 continue;
1486
1487 /* For each def in insn... */
1488 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1489 {
1490 struct ref *def = def_link->ref;
1491 unsigned int dregno = DF_REF_REGNO (def);
1492
1493 DF_REF_CHAIN (def) = 0;
1494
1495 /* While the reg-use chains are not essential, it
1496 is _much_ faster to search these short lists rather
1497 than all the reaching uses, especially for large functions. */
1498 for (use_link = df->regs[dregno].uses; use_link;
1499 use_link = use_link->next)
1500 {
1501 struct ref *use = use_link->ref;
1502
1503 if (bitmap_bit_p (ru, DF_REF_ID (use)))
1504 {
1505 DF_REF_CHAIN (def)
1506 = df_link_create (use, DF_REF_CHAIN (def));
1507
1508 bitmap_clear_bit (ru, DF_REF_ID (use));
1509 }
1510 }
1511 }
1512
1513 /* For each use in insn... */
1514 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1515 {
1516 struct ref *use = use_link->ref;
1517 bitmap_set_bit (ru, DF_REF_ID (use));
1518 }
1519 }
1520 }
1521
1522
1523 /* Create def-use chains from reaching use bitmaps for basic blocks
1524 in BLOCKS. */
1525 static void
1526 df_du_chain_create (df, blocks)
1527 struct df *df;
1528 bitmap blocks;
1529 {
1530 bitmap ru;
1531 basic_block bb;
1532
1533 ru = BITMAP_XMALLOC ();
1534
1535 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1536 {
1537 df_bb_du_chain_create (df, bb, ru);
1538 });
1539
1540 BITMAP_XFREE (ru);
1541 }
1542
1543
1544 /* Create use-def chains from reaching def bitmaps for basic block BB. */
1545 static void
1546 df_bb_ud_chain_create (df, bb)
1547 struct df *df;
1548 basic_block bb;
1549 {
1550 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1551 struct ref **reg_def_last = df->reg_def_last;
1552 rtx insn;
1553
1554 memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *));
1555
1556 /* For each use in BB create a linked list (chain) of defs
1557 that reach the use. */
1558 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1559 insn = NEXT_INSN (insn))
1560 {
1561 unsigned int uid = INSN_UID (insn);
1562 struct df_link *use_link;
1563 struct df_link *def_link;
1564
1565 if (! INSN_P (insn))
1566 continue;
1567
1568 /* For each use in insn... */
1569 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1570 {
1571 struct ref *use = use_link->ref;
1572 unsigned int regno = DF_REF_REGNO (use);
1573
1574 DF_REF_CHAIN (use) = 0;
1575
1576 /* Has regno been defined in this BB yet? If so, use
1577 the last def as the single entry for the use-def
1578 chain for this use. Otherwise, we need to add all
1579 the defs using this regno that reach the start of
1580 this BB. */
1581 if (reg_def_last[regno])
1582 {
1583 DF_REF_CHAIN (use)
1584 = df_link_create (reg_def_last[regno], 0);
1585 }
1586 else
1587 {
1588 /* While the reg-def chains are not essential, it is
1589 _much_ faster to search these short lists rather than
1590 all the reaching defs, especially for large
1591 functions. */
1592 for (def_link = df->regs[regno].defs; def_link;
1593 def_link = def_link->next)
1594 {
1595 struct ref *def = def_link->ref;
1596
1597 if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def)))
1598 {
1599 DF_REF_CHAIN (use)
1600 = df_link_create (def, DF_REF_CHAIN (use));
1601 }
1602 }
1603 }
1604 }
1605
1606
1607 /* For each def in insn...record the last def of each reg. */
1608 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1609 {
1610 struct ref *def = def_link->ref;
1611 int dregno = DF_REF_REGNO (def);
1612
1613 reg_def_last[dregno] = def;
1614 }
1615 }
1616 }
1617
1618
1619 /* Create use-def chains from reaching def bitmaps for basic blocks
1620 within BLOCKS. */
1621 static void
1622 df_ud_chain_create (df, blocks)
1623 struct df *df;
1624 bitmap blocks;
1625 {
1626 basic_block bb;
1627
1628 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1629 {
1630 df_bb_ud_chain_create (df, bb);
1631 });
1632 }
1633 \f
1634
1635
1636 static void
1637 df_rd_transfer_function (bb, changed, in, out, gen, kill, data)
1638 int bb ATTRIBUTE_UNUSED;
1639 int *changed;
1640 bitmap in, out, gen, kill;
1641 void *data ATTRIBUTE_UNUSED;
1642 {
1643 *changed = bitmap_union_of_diff (out, gen, in, kill);
1644 }
1645 static void
1646 df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
1647 int bb ATTRIBUTE_UNUSED;
1648 int *changed;
1649 bitmap in, out, gen, kill;
1650 void *data ATTRIBUTE_UNUSED;
1651 {
1652 *changed = bitmap_union_of_diff (in, gen, out, kill);
1653 }
1654
1655 static void
1656 df_lr_transfer_function (bb, changed, in, out, use, def, data)
1657 int bb ATTRIBUTE_UNUSED;
1658 int *changed;
1659 bitmap in, out, use, def;
1660 void *data ATTRIBUTE_UNUSED;
1661 {
1662 *changed = bitmap_union_of_diff (in, use, out, def);
1663 }
1664
1665
1666 /* Compute local reaching def info for basic block BB. */
1667 static void
1668 df_bb_rd_local_compute (df, bb)
1669 struct df *df;
1670 basic_block bb;
1671 {
1672 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1673 rtx insn;
1674
1675 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1676 insn = NEXT_INSN (insn))
1677 {
1678 unsigned int uid = INSN_UID (insn);
1679 struct df_link *def_link;
1680
1681 if (! INSN_P (insn))
1682 continue;
1683
1684 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1685 {
1686 struct ref *def = def_link->ref;
1687 unsigned int regno = DF_REF_REGNO (def);
1688 struct df_link *def2_link;
1689
1690 for (def2_link = df->regs[regno].defs; def2_link;
1691 def2_link = def2_link->next)
1692 {
1693 struct ref *def2 = def2_link->ref;
1694
1695 /* Add all defs of this reg to the set of kills. This
1696 is greedy since many of these defs will not actually
1697 be killed by this BB but it keeps things a lot
1698 simpler. */
1699 bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
1700
1701 /* Zap from the set of gens for this BB. */
1702 bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
1703 }
1704
1705 bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
1706 }
1707 }
1708
1709 bb_info->rd_valid = 1;
1710 }
1711
1712
1713 /* Compute local reaching def info for each basic block within BLOCKS. */
1714 static void
1715 df_rd_local_compute (df, blocks)
1716 struct df *df;
1717 bitmap blocks;
1718 {
1719 basic_block bb;
1720
1721 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1722 {
1723 df_bb_rd_local_compute (df, bb);
1724 });
1725 }
1726
1727
1728 /* Compute local reaching use (upward exposed use) info for basic
1729 block BB. */
1730 static void
1731 df_bb_ru_local_compute (df, bb)
1732 struct df *df;
1733 basic_block bb;
1734 {
1735 /* This is much more tricky than computing reaching defs. With
1736 reaching defs, defs get killed by other defs. With upwards
1737 exposed uses, these get killed by defs with the same regno. */
1738
1739 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1740 rtx insn;
1741
1742
1743 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1744 insn = PREV_INSN (insn))
1745 {
1746 unsigned int uid = INSN_UID (insn);
1747 struct df_link *def_link;
1748 struct df_link *use_link;
1749
1750 if (! INSN_P (insn))
1751 continue;
1752
1753 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1754 {
1755 struct ref *def = def_link->ref;
1756 unsigned int dregno = DF_REF_REGNO (def);
1757
1758 for (use_link = df->regs[dregno].uses; use_link;
1759 use_link = use_link->next)
1760 {
1761 struct ref *use = use_link->ref;
1762
1763 /* Add all uses of this reg to the set of kills. This
1764 is greedy since many of these uses will not actually
1765 be killed by this BB but it keeps things a lot
1766 simpler. */
1767 bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use));
1768
1769 /* Zap from the set of gens for this BB. */
1770 bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use));
1771 }
1772 }
1773
1774 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1775 {
1776 struct ref *use = use_link->ref;
1777 /* Add use to set of gens in this BB. */
1778 bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
1779 }
1780 }
1781 bb_info->ru_valid = 1;
1782 }
1783
1784
1785 /* Compute local reaching use (upward exposed use) info for each basic
1786 block within BLOCKS. */
1787 static void
1788 df_ru_local_compute (df, blocks)
1789 struct df *df;
1790 bitmap blocks;
1791 {
1792 basic_block bb;
1793
1794 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1795 {
1796 df_bb_ru_local_compute (df, bb);
1797 });
1798 }
1799
1800
1801 /* Compute local live variable info for basic block BB. */
1802 static void
1803 df_bb_lr_local_compute (df, bb)
1804 struct df *df;
1805 basic_block bb;
1806 {
1807 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1808 rtx insn;
1809
1810 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1811 insn = PREV_INSN (insn))
1812 {
1813 unsigned int uid = INSN_UID (insn);
1814 struct df_link *link;
1815
1816 if (! INSN_P (insn))
1817 continue;
1818
1819 for (link = df->insns[uid].defs; link; link = link->next)
1820 {
1821 struct ref *def = link->ref;
1822 unsigned int dregno = DF_REF_REGNO (def);
1823
1824 /* Add def to set of defs in this BB. */
1825 bitmap_set_bit (bb_info->lr_def, dregno);
1826
1827 bitmap_clear_bit (bb_info->lr_use, dregno);
1828 }
1829
1830 for (link = df->insns[uid].uses; link; link = link->next)
1831 {
1832 struct ref *use = link->ref;
1833 /* Add use to set of uses in this BB. */
1834 bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
1835 }
1836 }
1837 bb_info->lr_valid = 1;
1838 }
1839
1840
1841 /* Compute local live variable info for each basic block within BLOCKS. */
1842 static void
1843 df_lr_local_compute (df, blocks)
1844 struct df *df;
1845 bitmap blocks;
1846 {
1847 basic_block bb;
1848
1849 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1850 {
1851 df_bb_lr_local_compute (df, bb);
1852 });
1853 }
1854
1855
1856 /* Compute register info: lifetime, bb, and number of defs and uses
1857 for basic block BB. */
1858 static void
1859 df_bb_reg_info_compute (df, bb, live)
1860 struct df *df;
1861 basic_block bb;
1862 bitmap live;
1863 {
1864 struct reg_info *reg_info = df->regs;
1865 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1866 rtx insn;
1867
1868 bitmap_copy (live, bb_info->lr_out);
1869
1870 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1871 insn = PREV_INSN (insn))
1872 {
1873 unsigned int uid = INSN_UID (insn);
1874 unsigned int regno;
1875 struct df_link *link;
1876
1877 if (! INSN_P (insn))
1878 continue;
1879
1880 for (link = df->insns[uid].defs; link; link = link->next)
1881 {
1882 struct ref *def = link->ref;
1883 unsigned int dregno = DF_REF_REGNO (def);
1884
1885 /* Kill this register. */
1886 bitmap_clear_bit (live, dregno);
1887 reg_info[dregno].n_defs++;
1888 }
1889
1890 for (link = df->insns[uid].uses; link; link = link->next)
1891 {
1892 struct ref *use = link->ref;
1893 unsigned int uregno = DF_REF_REGNO (use);
1894
1895 /* This register is now live. */
1896 bitmap_set_bit (live, uregno);
1897 reg_info[uregno].n_uses++;
1898 }
1899
1900 /* Increment lifetimes of all live registers. */
1901 EXECUTE_IF_SET_IN_BITMAP (live, 0, regno,
1902 {
1903 reg_info[regno].lifetime++;
1904 });
1905 }
1906 }
1907
1908
1909 /* Compute register info: lifetime, bb, and number of defs and uses. */
1910 static void
1911 df_reg_info_compute (df, blocks)
1912 struct df *df;
1913 bitmap blocks;
1914 {
1915 basic_block bb;
1916 bitmap live;
1917
1918 live = BITMAP_XMALLOC ();
1919
1920 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1921 {
1922 df_bb_reg_info_compute (df, bb, live);
1923 });
1924
1925 BITMAP_XFREE (live);
1926 }
1927
1928
1929 /* Assign LUIDs for BB. */
1930 static int
1931 df_bb_luids_set (df, bb)
1932 struct df *df;
1933 basic_block bb;
1934 {
1935 rtx insn;
1936 int luid = 0;
1937
1938 /* The LUIDs are monotonically increasing for each basic block. */
1939
1940 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1941 {
1942 if (INSN_P (insn))
1943 DF_INSN_LUID (df, insn) = luid++;
1944 DF_INSN_LUID (df, insn) = luid;
1945
1946 if (insn == bb->end)
1947 break;
1948 }
1949 return luid;
1950 }
1951
1952
1953 /* Assign LUIDs for each basic block within BLOCKS. */
1954 static int
1955 df_luids_set (df, blocks)
1956 struct df *df;
1957 bitmap blocks;
1958 {
1959 basic_block bb;
1960 int total = 0;
1961
1962 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1963 {
1964 total += df_bb_luids_set (df, bb);
1965 });
1966 return total;
1967 }
1968
1969 /* Perform dataflow analysis using existing DF structure for blocks
1970 within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
1971 static void
1972 df_analyse_1 (df, blocks, flags, update)
1973 struct df *df;
1974 bitmap blocks;
1975 int flags;
1976 int update;
1977 {
1978 int aflags;
1979 int dflags;
1980 int i;
1981 basic_block bb;
1982
1983 dflags = 0;
1984 aflags = flags;
1985 if (flags & DF_UD_CHAIN)
1986 aflags |= DF_RD | DF_RD_CHAIN;
1987
1988 if (flags & DF_DU_CHAIN)
1989 aflags |= DF_RU;
1990
1991 if (flags & DF_RU)
1992 aflags |= DF_RU_CHAIN;
1993
1994 if (flags & DF_REG_INFO)
1995 aflags |= DF_LR;
1996
1997 if (! blocks)
1998 blocks = df->all_blocks;
1999
2000 df->flags = flags;
2001 if (update)
2002 {
2003 df_refs_update (df);
2004 /* More fine grained incremental dataflow analysis would be
2005 nice. For now recompute the whole shebang for the
2006 modified blocks. */
2007 #if 0
2008 df_refs_unlink (df, blocks);
2009 #endif
2010 /* All the def-use, use-def chains can be potentially
2011 modified by changes in one block. The size of the
2012 bitmaps can also change. */
2013 }
2014 else
2015 {
2016 /* Scan the function for all register defs and uses. */
2017 df_refs_queue (df);
2018 df_refs_record (df, blocks);
2019
2020 /* Link all the new defs and uses to the insns. */
2021 df_refs_process (df);
2022 }
2023
2024 /* Allocate the bitmaps now the total number of defs and uses are
2025 known. If the number of defs or uses have changed, then
2026 these bitmaps need to be reallocated. */
2027 df_bitmaps_alloc (df, aflags);
2028
2029 /* Set the LUIDs for each specified basic block. */
2030 df_luids_set (df, blocks);
2031
2032 /* Recreate reg-def and reg-use chains from scratch so that first
2033 def is at the head of the reg-def chain and the last use is at
2034 the head of the reg-use chain. This is only important for
2035 regs local to a basic block as it speeds up searching. */
2036 if (aflags & DF_RD_CHAIN)
2037 {
2038 df_reg_def_chain_create (df, blocks);
2039 }
2040
2041 if (aflags & DF_RU_CHAIN)
2042 {
2043 df_reg_use_chain_create (df, blocks);
2044 }
2045
2046 df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
2047 df->rc_order = xmalloc (sizeof (int) * n_basic_blocks);
2048 df->rts_order = xmalloc (sizeof (int) * n_basic_blocks);
2049 df->inverse_dfs_map = xmalloc (sizeof (int) * last_basic_block);
2050 df->inverse_rc_map = xmalloc (sizeof (int) * last_basic_block);
2051 df->inverse_rts_map = xmalloc (sizeof (int) * last_basic_block);
2052
2053 flow_depth_first_order_compute (df->dfs_order, df->rc_order);
2054 flow_reverse_top_sort_order_compute (df->rts_order);
2055 for (i = 0; i < n_basic_blocks; i++)
2056 {
2057 df->inverse_dfs_map[df->dfs_order[i]] = i;
2058 df->inverse_rc_map[df->rc_order[i]] = i;
2059 df->inverse_rts_map[df->rts_order[i]] = i;
2060 }
2061 if (aflags & DF_RD)
2062 {
2063 /* Compute the sets of gens and kills for the defs of each bb. */
2064 df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
2065 {
2066 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2067 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2068 bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
2069 bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
2070 FOR_EACH_BB (bb)
2071 {
2072 in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
2073 out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
2074 gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
2075 kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
2076 }
2077 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2078 FORWARD, UNION, df_rd_transfer_function,
2079 df->inverse_rc_map, NULL);
2080 free (in);
2081 free (out);
2082 free (gen);
2083 free (kill);
2084 }
2085 }
2086
2087 if (aflags & DF_UD_CHAIN)
2088 {
2089 /* Create use-def chains. */
2090 df_ud_chain_create (df, df->all_blocks);
2091
2092 if (! (flags & DF_RD))
2093 dflags |= DF_RD;
2094 }
2095
2096 if (aflags & DF_RU)
2097 {
2098 /* Compute the sets of gens and kills for the upwards exposed
2099 uses in each bb. */
2100 df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
2101 {
2102 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2103 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2104 bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
2105 bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
2106 FOR_EACH_BB (bb)
2107 {
2108 in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
2109 out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
2110 gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
2111 kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
2112 }
2113 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2114 BACKWARD, UNION, df_ru_transfer_function,
2115 df->inverse_rts_map, NULL);
2116 free (in);
2117 free (out);
2118 free (gen);
2119 free (kill);
2120 }
2121 }
2122
2123 if (aflags & DF_DU_CHAIN)
2124 {
2125 /* Create def-use chains. */
2126 df_du_chain_create (df, df->all_blocks);
2127
2128 if (! (flags & DF_RU))
2129 dflags |= DF_RU;
2130 }
2131
2132 /* Free up bitmaps that are no longer required. */
2133 if (dflags)
2134 df_bitmaps_free (df, dflags);
2135
2136 if (aflags & DF_LR)
2137 {
2138 /* Compute the sets of defs and uses of live variables. */
2139 df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
2140 {
2141 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2142 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2143 bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
2144 bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
2145 FOR_EACH_BB (bb)
2146 {
2147 in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
2148 out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
2149 use[bb->index] = DF_BB_INFO (df, bb)->lr_use;
2150 def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
2151 }
2152 iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
2153 BACKWARD, UNION, df_lr_transfer_function,
2154 df->inverse_rts_map, NULL);
2155 free (in);
2156 free (out);
2157 free (use);
2158 free (def);
2159 }
2160 }
2161
2162 if (aflags & DF_REG_INFO)
2163 {
2164 df_reg_info_compute (df, df->all_blocks);
2165 }
2166 free (df->dfs_order);
2167 free (df->rc_order);
2168 free (df->rts_order);
2169 free (df->inverse_rc_map);
2170 free (df->inverse_dfs_map);
2171 free (df->inverse_rts_map);
2172 }
2173
2174
2175 /* Initialize dataflow analysis. */
2176 struct df *
2177 df_init ()
2178 {
2179 struct df *df;
2180
2181 df = xcalloc (1, sizeof (struct df));
2182
2183 /* Squirrel away a global for debugging. */
2184 ddf = df;
2185
2186 return df;
2187 }
2188
2189
2190 /* Start queuing refs. */
2191 static int
2192 df_refs_queue (df)
2193 struct df *df;
2194 {
2195 df->def_id_save = df->def_id;
2196 df->use_id_save = df->use_id;
2197 /* ???? Perhaps we should save current obstack state so that we can
2198 unwind it. */
2199 return 0;
2200 }
2201
2202
2203 /* Process queued refs. */
2204 static int
2205 df_refs_process (df)
2206 struct df *df;
2207 {
2208 unsigned int i;
2209
2210 /* Build new insn-def chains. */
2211 for (i = df->def_id_save; i != df->def_id; i++)
2212 {
2213 struct ref *def = df->defs[i];
2214 unsigned int uid = DF_REF_INSN_UID (def);
2215
2216 /* Add def to head of def list for INSN. */
2217 df->insns[uid].defs
2218 = df_link_create (def, df->insns[uid].defs);
2219 }
2220
2221 /* Build new insn-use chains. */
2222 for (i = df->use_id_save; i != df->use_id; i++)
2223 {
2224 struct ref *use = df->uses[i];
2225 unsigned int uid = DF_REF_INSN_UID (use);
2226
2227 /* Add use to head of use list for INSN. */
2228 df->insns[uid].uses
2229 = df_link_create (use, df->insns[uid].uses);
2230 }
2231 return 0;
2232 }
2233
2234
2235 /* Update refs for basic block BB. */
2236 static int
2237 df_bb_refs_update (df, bb)
2238 struct df *df;
2239 basic_block bb;
2240 {
2241 rtx insn;
2242 int count = 0;
2243
2244 /* While we have to scan the chain of insns for this BB, we don't
2245 need to allocate and queue a long chain of BB/INSN pairs. Using
2246 a bitmap for insns_modified saves memory and avoids queuing
2247 duplicates. */
2248
2249 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2250 {
2251 unsigned int uid;
2252
2253 uid = INSN_UID (insn);
2254
2255 if (bitmap_bit_p (df->insns_modified, uid))
2256 {
2257 /* Delete any allocated refs of this insn. MPH, FIXME. */
2258 df_insn_refs_unlink (df, bb, insn);
2259
2260 /* Scan the insn for refs. */
2261 df_insn_refs_record (df, bb, insn);
2262
2263 count++;
2264 }
2265 if (insn == bb->end)
2266 break;
2267 }
2268 return count;
2269 }
2270
2271
2272 /* Process all the modified/deleted insns that were queued. */
2273 static int
2274 df_refs_update (df)
2275 struct df *df;
2276 {
2277 basic_block bb;
2278 int count = 0;
2279
2280 if ((unsigned int) max_reg_num () >= df->reg_size)
2281 df_reg_table_realloc (df, 0);
2282
2283 df_refs_queue (df);
2284
2285 FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
2286 {
2287 count += df_bb_refs_update (df, bb);
2288 });
2289
2290 df_refs_process (df);
2291 return count;
2292 }
2293
2294
2295 /* Return nonzero if any of the requested blocks in the bitmap
2296 BLOCKS have been modified. */
2297 static int
2298 df_modified_p (df, blocks)
2299 struct df *df;
2300 bitmap blocks;
2301 {
2302 int update = 0;
2303 basic_block bb;
2304
2305 if (!df->n_bbs)
2306 return 0;
2307
2308 FOR_EACH_BB (bb)
2309 if (bitmap_bit_p (df->bbs_modified, bb->index)
2310 && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->index)))
2311 {
2312 update = 1;
2313 break;
2314 }
2315
2316 return update;
2317 }
2318
2319
2320 /* Analyse dataflow info for the basic blocks specified by the bitmap
2321 BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
2322 modified blocks if BLOCKS is -1. */
2323 int
2324 df_analyse (df, blocks, flags)
2325 struct df *df;
2326 bitmap blocks;
2327 int flags;
2328 {
2329 int update;
2330
2331 /* We could deal with additional basic blocks being created by
2332 rescanning everything again. */
2333 if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
2334 abort ();
2335
2336 update = df_modified_p (df, blocks);
2337 if (update || (flags != df->flags))
2338 {
2339 if (! blocks)
2340 {
2341 if (df->n_bbs)
2342 {
2343 /* Recompute everything from scratch. */
2344 df_free (df);
2345 }
2346 /* Allocate and initialize data structures. */
2347 df_alloc (df, max_reg_num ());
2348 df_analyse_1 (df, 0, flags, 0);
2349 update = 1;
2350 }
2351 else
2352 {
2353 if (blocks == (bitmap) -1)
2354 blocks = df->bbs_modified;
2355
2356 if (! df->n_bbs)
2357 abort ();
2358
2359 df_analyse_1 (df, blocks, flags, 1);
2360 bitmap_zero (df->bbs_modified);
2361 bitmap_zero (df->insns_modified);
2362 }
2363 }
2364 return update;
2365 }
2366
2367
2368 /* Free all the dataflow info and the DF structure. */
2369 void
2370 df_finish (df)
2371 struct df *df;
2372 {
2373 df_free (df);
2374 free (df);
2375 }
2376
2377
2378 /* Unlink INSN from its reference information. */
2379 static void
2380 df_insn_refs_unlink (df, bb, insn)
2381 struct df *df;
2382 basic_block bb ATTRIBUTE_UNUSED;
2383 rtx insn;
2384 {
2385 struct df_link *link;
2386 unsigned int uid;
2387
2388 uid = INSN_UID (insn);
2389
2390 /* Unlink all refs defined by this insn. */
2391 for (link = df->insns[uid].defs; link; link = link->next)
2392 df_def_unlink (df, link->ref);
2393
2394 /* Unlink all refs used by this insn. */
2395 for (link = df->insns[uid].uses; link; link = link->next)
2396 df_use_unlink (df, link->ref);
2397
2398 df->insns[uid].defs = 0;
2399 df->insns[uid].uses = 0;
2400 }
2401
2402
2403 #if 0
2404 /* Unlink all the insns within BB from their reference information. */
2405 static void
2406 df_bb_refs_unlink (df, bb)
2407 struct df *df;
2408 basic_block bb;
2409 {
2410 rtx insn;
2411
2412 /* Scan the block an insn at a time from beginning to end. */
2413 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2414 {
2415 if (INSN_P (insn))
2416 {
2417 /* Unlink refs for INSN. */
2418 df_insn_refs_unlink (df, bb, insn);
2419 }
2420 if (insn == bb->end)
2421 break;
2422 }
2423 }
2424
2425
2426 /* Unlink all the refs in the basic blocks specified by BLOCKS.
2427 Not currently used. */
2428 static void
2429 df_refs_unlink (df, blocks)
2430 struct df *df;
2431 bitmap blocks;
2432 {
2433 basic_block bb;
2434
2435 if (blocks)
2436 {
2437 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2438 {
2439 df_bb_refs_unlink (df, bb);
2440 });
2441 }
2442 else
2443 {
2444 FOR_EACH_BB (bb)
2445 df_bb_refs_unlink (df, bb);
2446 }
2447 }
2448 #endif
2449 \f
2450 /* Functions to modify insns. */
2451
2452
2453 /* Delete INSN and all its reference information. */
2454 rtx
2455 df_insn_delete (df, bb, insn)
2456 struct df *df;
2457 basic_block bb ATTRIBUTE_UNUSED;
2458 rtx insn;
2459 {
2460 /* If the insn is a jump, we should perhaps call delete_insn to
2461 handle the JUMP_LABEL? */
2462
2463 /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
2464 if (insn == bb->head)
2465 abort ();
2466
2467 /* Delete the insn. */
2468 delete_insn (insn);
2469
2470 df_insn_modify (df, bb, insn);
2471
2472 return NEXT_INSN (insn);
2473 }
2474
2475
2476 /* Mark that INSN within BB may have changed (created/modified/deleted).
2477 This may be called multiple times for the same insn. There is no
2478 harm calling this function if the insn wasn't changed; it will just
2479 slow down the rescanning of refs. */
2480 void
2481 df_insn_modify (df, bb, insn)
2482 struct df *df;
2483 basic_block bb;
2484 rtx insn;
2485 {
2486 unsigned int uid;
2487
2488 uid = INSN_UID (insn);
2489 if (uid >= df->insn_size)
2490 df_insn_table_realloc (df, uid);
2491
2492 bitmap_set_bit (df->bbs_modified, bb->index);
2493 bitmap_set_bit (df->insns_modified, uid);
2494
2495 /* For incremental updating on the fly, perhaps we could make a copy
2496 of all the refs of the original insn and turn them into
2497 anti-refs. When df_refs_update finds these anti-refs, it annihilates
2498 the original refs. If validate_change fails then these anti-refs
2499 will just get ignored. */
2500 }
2501
2502
2503 typedef struct replace_args {
2504 rtx match;
2505 rtx replacement;
2506 rtx insn;
2507 int modified;
2508 } replace_args;
2509
2510
2511 /* Replace mem pointed to by PX with its associated pseudo register.
2512 DATA is actually a pointer to a structure describing the
2513 instruction currently being scanned and the MEM we are currently
2514 replacing. */
2515 static int
2516 df_rtx_mem_replace (px, data)
2517 rtx *px;
2518 void *data;
2519 {
2520 replace_args *args = (replace_args *) data;
2521 rtx mem = *px;
2522
2523 if (mem == NULL_RTX)
2524 return 0;
2525
2526 switch (GET_CODE (mem))
2527 {
2528 case MEM:
2529 break;
2530
2531 case CONST_DOUBLE:
2532 /* We're not interested in the MEM associated with a
2533 CONST_DOUBLE, so there's no need to traverse into one. */
2534 return -1;
2535
2536 default:
2537 /* This is not a MEM. */
2538 return 0;
2539 }
2540
2541 if (!rtx_equal_p (args->match, mem))
2542 /* This is not the MEM we are currently replacing. */
2543 return 0;
2544
2545 /* Actually replace the MEM. */
2546 validate_change (args->insn, px, args->replacement, 1);
2547 args->modified++;
2548
2549 return 0;
2550 }
2551
2552
2553 int
2554 df_insn_mem_replace (df, bb, insn, mem, reg)
2555 struct df *df;
2556 basic_block bb;
2557 rtx insn;
2558 rtx mem;
2559 rtx reg;
2560 {
2561 replace_args args;
2562
2563 args.insn = insn;
2564 args.match = mem;
2565 args.replacement = reg;
2566 args.modified = 0;
2567
2568 /* Search and replace all matching mems within insn. */
2569 for_each_rtx (&insn, df_rtx_mem_replace, &args);
2570
2571 if (args.modified)
2572 df_insn_modify (df, bb, insn);
2573
2574 /* ???? FIXME. We may have a new def or one or more new uses of REG
2575 in INSN. REG should be a new pseudo so it won't affect the
2576 dataflow information that we currently have. We should add
2577 the new uses and defs to INSN and then recreate the chains
2578 when df_analyse is called. */
2579 return args.modified;
2580 }
2581
2582
2583 /* Replace one register with another. Called through for_each_rtx; PX
2584 points to the rtx being scanned. DATA is actually a pointer to a
2585 structure of arguments. */
2586 static int
2587 df_rtx_reg_replace (px, data)
2588 rtx *px;
2589 void *data;
2590 {
2591 rtx x = *px;
2592 replace_args *args = (replace_args *) data;
2593
2594 if (x == NULL_RTX)
2595 return 0;
2596
2597 if (x == args->match)
2598 {
2599 validate_change (args->insn, px, args->replacement, 1);
2600 args->modified++;
2601 }
2602
2603 return 0;
2604 }
2605
2606
2607 /* Replace the reg within every ref on CHAIN that is within the set
2608 BLOCKS of basic blocks with NEWREG. Also update the regs within
2609 REG_NOTES. */
2610 void
2611 df_refs_reg_replace (df, blocks, chain, oldreg, newreg)
2612 struct df *df;
2613 bitmap blocks;
2614 struct df_link *chain;
2615 rtx oldreg;
2616 rtx newreg;
2617 {
2618 struct df_link *link;
2619 replace_args args;
2620
2621 if (! blocks)
2622 blocks = df->all_blocks;
2623
2624 args.match = oldreg;
2625 args.replacement = newreg;
2626 args.modified = 0;
2627
2628 for (link = chain; link; link = link->next)
2629 {
2630 struct ref *ref = link->ref;
2631 rtx insn = DF_REF_INSN (ref);
2632
2633 if (! INSN_P (insn))
2634 continue;
2635
2636 if (bitmap_bit_p (blocks, DF_REF_BBNO (ref)))
2637 {
2638 df_ref_reg_replace (df, ref, oldreg, newreg);
2639
2640 /* Replace occurrences of the reg within the REG_NOTES. */
2641 if ((! link->next || DF_REF_INSN (ref)
2642 != DF_REF_INSN (link->next->ref))
2643 && REG_NOTES (insn))
2644 {
2645 args.insn = insn;
2646 for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
2647 }
2648 }
2649 else
2650 {
2651 /* Temporary check to ensure that we have a grip on which
2652 regs should be replaced. */
2653 abort ();
2654 }
2655 }
2656 }
2657
2658
2659 /* Replace all occurrences of register OLDREG with register NEWREG in
2660 blocks defined by bitmap BLOCKS. This also replaces occurrences of
2661 OLDREG in the REG_NOTES but only for insns containing OLDREG. This
2662 routine expects the reg-use and reg-def chains to be valid. */
2663 int
2664 df_reg_replace (df, blocks, oldreg, newreg)
2665 struct df *df;
2666 bitmap blocks;
2667 rtx oldreg;
2668 rtx newreg;
2669 {
2670 unsigned int oldregno = REGNO (oldreg);
2671
2672 df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg);
2673 df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg);
2674 return 1;
2675 }
2676
2677
2678 /* Try replacing the reg within REF with NEWREG. Do not modify
2679 def-use/use-def chains. */
2680 int
2681 df_ref_reg_replace (df, ref, oldreg, newreg)
2682 struct df *df;
2683 struct ref *ref;
2684 rtx oldreg;
2685 rtx newreg;
2686 {
2687 /* Check that insn was deleted by being converted into a NOTE. If
2688 so ignore this insn. */
2689 if (! INSN_P (DF_REF_INSN (ref)))
2690 return 0;
2691
2692 if (oldreg && oldreg != DF_REF_REG (ref))
2693 abort ();
2694
2695 if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
2696 return 0;
2697
2698 df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref));
2699 return 1;
2700 }
2701
2702
2703 struct ref*
2704 df_bb_def_use_swap (df, bb, def_insn, use_insn, regno)
2705 struct df * df;
2706 basic_block bb;
2707 rtx def_insn;
2708 rtx use_insn;
2709 unsigned int regno;
2710 {
2711 struct ref *def;
2712 struct ref *use;
2713 int def_uid;
2714 int use_uid;
2715 struct df_link *link;
2716
2717 def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno);
2718 if (! def)
2719 return 0;
2720
2721 use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno);
2722 if (! use)
2723 return 0;
2724
2725 /* The USE no longer exists. */
2726 use_uid = INSN_UID (use_insn);
2727 df_use_unlink (df, use);
2728 df_ref_unlink (&df->insns[use_uid].uses, use);
2729
2730 /* The DEF requires shifting so remove it from DEF_INSN
2731 and add it to USE_INSN by reusing LINK. */
2732 def_uid = INSN_UID (def_insn);
2733 link = df_ref_unlink (&df->insns[def_uid].defs, def);
2734 link->ref = def;
2735 link->next = df->insns[use_uid].defs;
2736 df->insns[use_uid].defs = link;
2737
2738 #if 0
2739 link = df_ref_unlink (&df->regs[regno].defs, def);
2740 link->ref = def;
2741 link->next = df->regs[regno].defs;
2742 df->insns[regno].defs = link;
2743 #endif
2744
2745 DF_REF_INSN (def) = use_insn;
2746 return def;
2747 }
2748
2749
2750 /* Record df between FIRST_INSN and LAST_INSN inclusive. All new
2751 insns must be processed by this routine. */
2752 static void
2753 df_insns_modify (df, bb, first_insn, last_insn)
2754 struct df *df;
2755 basic_block bb;
2756 rtx first_insn;
2757 rtx last_insn;
2758 {
2759 rtx insn;
2760
2761 for (insn = first_insn; ; insn = NEXT_INSN (insn))
2762 {
2763 unsigned int uid;
2764
2765 /* A non-const call should not have slipped through the net. If
2766 it does, we need to create a new basic block. Ouch. The
2767 same applies for a label. */
2768 if ((GET_CODE (insn) == CALL_INSN
2769 && ! CONST_OR_PURE_CALL_P (insn))
2770 || GET_CODE (insn) == CODE_LABEL)
2771 abort ();
2772
2773 uid = INSN_UID (insn);
2774
2775 if (uid >= df->insn_size)
2776 df_insn_table_realloc (df, uid);
2777
2778 df_insn_modify (df, bb, insn);
2779
2780 if (insn == last_insn)
2781 break;
2782 }
2783 }
2784
2785
2786 /* Emit PATTERN before INSN within BB. */
2787 rtx
2788 df_pattern_emit_before (df, pattern, bb, insn)
2789 struct df *df ATTRIBUTE_UNUSED;
2790 rtx pattern;
2791 basic_block bb;
2792 rtx insn;
2793 {
2794 rtx ret_insn;
2795 rtx prev_insn = PREV_INSN (insn);
2796
2797 /* We should not be inserting before the start of the block. */
2798 if (insn == bb->head)
2799 abort ();
2800 ret_insn = emit_insn_before (pattern, insn);
2801 if (ret_insn == insn)
2802 return ret_insn;
2803
2804 df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn);
2805 return ret_insn;
2806 }
2807
2808
2809 /* Emit PATTERN after INSN within BB. */
2810 rtx
2811 df_pattern_emit_after (df, pattern, bb, insn)
2812 struct df *df;
2813 rtx pattern;
2814 basic_block bb;
2815 rtx insn;
2816 {
2817 rtx ret_insn;
2818
2819 ret_insn = emit_insn_after (pattern, insn);
2820 if (ret_insn == insn)
2821 return ret_insn;
2822
2823 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2824 return ret_insn;
2825 }
2826
2827
2828 /* Emit jump PATTERN after INSN within BB. */
2829 rtx
2830 df_jump_pattern_emit_after (df, pattern, bb, insn)
2831 struct df *df;
2832 rtx pattern;
2833 basic_block bb;
2834 rtx insn;
2835 {
2836 rtx ret_insn;
2837
2838 ret_insn = emit_jump_insn_after (pattern, insn);
2839 if (ret_insn == insn)
2840 return ret_insn;
2841
2842 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2843 return ret_insn;
2844 }
2845
2846
2847 /* Move INSN within BB before BEFORE_INSN within BEFORE_BB.
2848
2849 This function should only be used to move loop invariant insns
2850 out of a loop where it has been proven that the def-use info
2851 will still be valid. */
2852 rtx
2853 df_insn_move_before (df, bb, insn, before_bb, before_insn)
2854 struct df *df;
2855 basic_block bb;
2856 rtx insn;
2857 basic_block before_bb;
2858 rtx before_insn;
2859 {
2860 struct df_link *link;
2861 unsigned int uid;
2862
2863 if (! bb)
2864 return df_pattern_emit_before (df, insn, before_bb, before_insn);
2865
2866 uid = INSN_UID (insn);
2867
2868 /* Change bb for all df defined and used by this insn. */
2869 for (link = df->insns[uid].defs; link; link = link->next)
2870 DF_REF_BB (link->ref) = before_bb;
2871 for (link = df->insns[uid].uses; link; link = link->next)
2872 DF_REF_BB (link->ref) = before_bb;
2873
2874 /* The lifetimes of the registers used in this insn will be reduced
2875 while the lifetimes of the registers defined in this insn
2876 are likely to be increased. */
2877
2878 /* ???? Perhaps all the insns moved should be stored on a list
2879 which df_analyse removes when it recalculates data flow. */
2880
2881 return emit_insn_before (insn, before_insn);
2882 }
2883 \f
2884 /* Functions to query dataflow information. */
2885
2886
2887 int
2888 df_insn_regno_def_p (df, bb, insn, regno)
2889 struct df *df;
2890 basic_block bb ATTRIBUTE_UNUSED;
2891 rtx insn;
2892 unsigned int regno;
2893 {
2894 unsigned int uid;
2895 struct df_link *link;
2896
2897 uid = INSN_UID (insn);
2898
2899 for (link = df->insns[uid].defs; link; link = link->next)
2900 {
2901 struct ref *def = link->ref;
2902
2903 if (DF_REF_REGNO (def) == regno)
2904 return 1;
2905 }
2906
2907 return 0;
2908 }
2909
2910
2911 static int
2912 df_def_dominates_all_uses_p (df, def)
2913 struct df *df ATTRIBUTE_UNUSED;
2914 struct ref *def;
2915 {
2916 struct df_link *du_link;
2917
2918 /* Follow def-use chain to find all the uses of this def. */
2919 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2920 {
2921 struct ref *use = du_link->ref;
2922 struct df_link *ud_link;
2923
2924 /* Follow use-def chain to check all the defs for this use. */
2925 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2926 if (ud_link->ref != def)
2927 return 0;
2928 }
2929 return 1;
2930 }
2931
2932
2933 int
2934 df_insn_dominates_all_uses_p (df, bb, insn)
2935 struct df *df;
2936 basic_block bb ATTRIBUTE_UNUSED;
2937 rtx insn;
2938 {
2939 unsigned int uid;
2940 struct df_link *link;
2941
2942 uid = INSN_UID (insn);
2943
2944 for (link = df->insns[uid].defs; link; link = link->next)
2945 {
2946 struct ref *def = link->ref;
2947
2948 if (! df_def_dominates_all_uses_p (df, def))
2949 return 0;
2950 }
2951
2952 return 1;
2953 }
2954
2955
2956 /* Return nonzero if all DF dominates all the uses within the bitmap
2957 BLOCKS. */
2958 static int
2959 df_def_dominates_uses_p (df, def, blocks)
2960 struct df *df ATTRIBUTE_UNUSED;
2961 struct ref *def;
2962 bitmap blocks;
2963 {
2964 struct df_link *du_link;
2965
2966 /* Follow def-use chain to find all the uses of this def. */
2967 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2968 {
2969 struct ref *use = du_link->ref;
2970 struct df_link *ud_link;
2971
2972 /* Only worry about the uses within BLOCKS. For example,
2973 consider a register defined within a loop that is live at the
2974 loop exits. */
2975 if (bitmap_bit_p (blocks, DF_REF_BBNO (use)))
2976 {
2977 /* Follow use-def chain to check all the defs for this use. */
2978 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2979 if (ud_link->ref != def)
2980 return 0;
2981 }
2982 }
2983 return 1;
2984 }
2985
2986
2987 /* Return nonzero if all the defs of INSN within BB dominates
2988 all the corresponding uses. */
2989 int
2990 df_insn_dominates_uses_p (df, bb, insn, blocks)
2991 struct df *df;
2992 basic_block bb ATTRIBUTE_UNUSED;
2993 rtx insn;
2994 bitmap blocks;
2995 {
2996 unsigned int uid;
2997 struct df_link *link;
2998
2999 uid = INSN_UID (insn);
3000
3001 for (link = df->insns[uid].defs; link; link = link->next)
3002 {
3003 struct ref *def = link->ref;
3004
3005 /* Only consider the defs within BLOCKS. */
3006 if (bitmap_bit_p (blocks, DF_REF_BBNO (def))
3007 && ! df_def_dominates_uses_p (df, def, blocks))
3008 return 0;
3009 }
3010 return 1;
3011 }
3012
3013
3014 /* Return the basic block that REG referenced in or NULL if referenced
3015 in multiple basic blocks. */
3016 basic_block
3017 df_regno_bb (df, regno)
3018 struct df *df;
3019 unsigned int regno;
3020 {
3021 struct df_link *defs = df->regs[regno].defs;
3022 struct df_link *uses = df->regs[regno].uses;
3023 struct ref *def = defs ? defs->ref : 0;
3024 struct ref *use = uses ? uses->ref : 0;
3025 basic_block bb_def = def ? DF_REF_BB (def) : 0;
3026 basic_block bb_use = use ? DF_REF_BB (use) : 0;
3027
3028 /* Compare blocks of first def and last use. ???? FIXME. What if
3029 the reg-def and reg-use lists are not correctly ordered. */
3030 return bb_def == bb_use ? bb_def : 0;
3031 }
3032
3033
3034 /* Return nonzero if REG used in multiple basic blocks. */
3035 int
3036 df_reg_global_p (df, reg)
3037 struct df *df;
3038 rtx reg;
3039 {
3040 return df_regno_bb (df, REGNO (reg)) != 0;
3041 }
3042
3043
3044 /* Return total lifetime (in insns) of REG. */
3045 int
3046 df_reg_lifetime (df, reg)
3047 struct df *df;
3048 rtx reg;
3049 {
3050 return df->regs[REGNO (reg)].lifetime;
3051 }
3052
3053
3054 /* Return nonzero if REG live at start of BB. */
3055 int
3056 df_bb_reg_live_start_p (df, bb, reg)
3057 struct df *df ATTRIBUTE_UNUSED;
3058 basic_block bb;
3059 rtx reg;
3060 {
3061 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3062
3063 #ifdef ENABLE_CHECKING
3064 if (! bb_info->lr_in)
3065 abort ();
3066 #endif
3067
3068 return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
3069 }
3070
3071
3072 /* Return nonzero if REG live at end of BB. */
3073 int
3074 df_bb_reg_live_end_p (df, bb, reg)
3075 struct df *df ATTRIBUTE_UNUSED;
3076 basic_block bb;
3077 rtx reg;
3078 {
3079 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3080
3081 #ifdef ENABLE_CHECKING
3082 if (! bb_info->lr_in)
3083 abort ();
3084 #endif
3085
3086 return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
3087 }
3088
3089
3090 /* Return -1 if life of REG1 before life of REG2, 1 if life of REG1
3091 after life of REG2, or 0, if the lives overlap. */
3092 int
3093 df_bb_regs_lives_compare (df, bb, reg1, reg2)
3094 struct df *df;
3095 basic_block bb;
3096 rtx reg1;
3097 rtx reg2;
3098 {
3099 unsigned int regno1 = REGNO (reg1);
3100 unsigned int regno2 = REGNO (reg2);
3101 struct ref *def1;
3102 struct ref *use1;
3103 struct ref *def2;
3104 struct ref *use2;
3105
3106
3107 /* The regs must be local to BB. */
3108 if (df_regno_bb (df, regno1) != bb
3109 || df_regno_bb (df, regno2) != bb)
3110 abort ();
3111
3112 def2 = df_bb_regno_first_def_find (df, bb, regno2);
3113 use1 = df_bb_regno_last_use_find (df, bb, regno1);
3114
3115 if (DF_INSN_LUID (df, DF_REF_INSN (def2))
3116 > DF_INSN_LUID (df, DF_REF_INSN (use1)))
3117 return -1;
3118
3119 def1 = df_bb_regno_first_def_find (df, bb, regno1);
3120 use2 = df_bb_regno_last_use_find (df, bb, regno2);
3121
3122 if (DF_INSN_LUID (df, DF_REF_INSN (def1))
3123 > DF_INSN_LUID (df, DF_REF_INSN (use2)))
3124 return 1;
3125
3126 return 0;
3127 }
3128
3129
3130 /* Return last use of REGNO within BB. */
3131 static struct ref *
3132 df_bb_regno_last_use_find (df, bb, regno)
3133 struct df * df;
3134 basic_block bb ATTRIBUTE_UNUSED;
3135 unsigned int regno;
3136 {
3137 struct df_link *link;
3138
3139 /* This assumes that the reg-use list is ordered such that for any
3140 BB, the last use is found first. However, since the BBs are not
3141 ordered, the first use in the chain is not necessarily the last
3142 use in the function. */
3143 for (link = df->regs[regno].uses; link; link = link->next)
3144 {
3145 struct ref *use = link->ref;
3146
3147 if (DF_REF_BB (use) == bb)
3148 return use;
3149 }
3150 return 0;
3151 }
3152
3153
3154 /* Return first def of REGNO within BB. */
3155 static struct ref *
3156 df_bb_regno_first_def_find (df, bb, regno)
3157 struct df * df;
3158 basic_block bb ATTRIBUTE_UNUSED;
3159 unsigned int regno;
3160 {
3161 struct df_link *link;
3162
3163 /* This assumes that the reg-def list is ordered such that for any
3164 BB, the first def is found first. However, since the BBs are not
3165 ordered, the first def in the chain is not necessarily the first
3166 def in the function. */
3167 for (link = df->regs[regno].defs; link; link = link->next)
3168 {
3169 struct ref *def = link->ref;
3170
3171 if (DF_REF_BB (def) == bb)
3172 return def;
3173 }
3174 return 0;
3175 }
3176
3177
3178 /* Return first use of REGNO inside INSN within BB. */
3179 static struct ref *
3180 df_bb_insn_regno_last_use_find (df, bb, insn, regno)
3181 struct df * df;
3182 basic_block bb ATTRIBUTE_UNUSED;
3183 rtx insn;
3184 unsigned int regno;
3185 {
3186 unsigned int uid;
3187 struct df_link *link;
3188
3189 uid = INSN_UID (insn);
3190
3191 for (link = df->insns[uid].uses; link; link = link->next)
3192 {
3193 struct ref *use = link->ref;
3194
3195 if (DF_REF_REGNO (use) == regno)
3196 return use;
3197 }
3198
3199 return 0;
3200 }
3201
3202
3203 /* Return first def of REGNO inside INSN within BB. */
3204 static struct ref *
3205 df_bb_insn_regno_first_def_find (df, bb, insn, regno)
3206 struct df * df;
3207 basic_block bb ATTRIBUTE_UNUSED;
3208 rtx insn;
3209 unsigned int regno;
3210 {
3211 unsigned int uid;
3212 struct df_link *link;
3213
3214 uid = INSN_UID (insn);
3215
3216 for (link = df->insns[uid].defs; link; link = link->next)
3217 {
3218 struct ref *def = link->ref;
3219
3220 if (DF_REF_REGNO (def) == regno)
3221 return def;
3222 }
3223
3224 return 0;
3225 }
3226
3227
3228 /* Return insn using REG if the BB contains only a single
3229 use and def of REG. */
3230 rtx
3231 df_bb_single_def_use_insn_find (df, bb, insn, reg)
3232 struct df * df;
3233 basic_block bb;
3234 rtx insn;
3235 rtx reg;
3236 {
3237 struct ref *def;
3238 struct ref *use;
3239 struct df_link *du_link;
3240
3241 def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
3242
3243 if (! def)
3244 abort ();
3245
3246 du_link = DF_REF_CHAIN (def);
3247
3248 if (! du_link)
3249 return NULL_RTX;
3250
3251 use = du_link->ref;
3252
3253 /* Check if def is dead. */
3254 if (! use)
3255 return NULL_RTX;
3256
3257 /* Check for multiple uses. */
3258 if (du_link->next)
3259 return NULL_RTX;
3260
3261 return DF_REF_INSN (use);
3262 }
3263 \f
3264 /* Functions for debugging/dumping dataflow information. */
3265
3266
3267 /* Dump a def-use or use-def chain for REF to FILE. */
3268 static void
3269 df_chain_dump (link, file)
3270 struct df_link *link;
3271 FILE *file;
3272 {
3273 fprintf (file, "{ ");
3274 for (; link; link = link->next)
3275 {
3276 fprintf (file, "%c%d ",
3277 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3278 DF_REF_ID (link->ref));
3279 }
3280 fprintf (file, "}");
3281 }
3282
3283 static void
3284 df_chain_dump_regno (link, file)
3285 struct df_link *link;
3286 FILE *file;
3287 {
3288 fprintf (file, "{ ");
3289 for (; link; link = link->next)
3290 {
3291 fprintf (file, "%c%d(%d) ",
3292 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3293 DF_REF_ID (link->ref),
3294 DF_REF_REGNO (link->ref));
3295 }
3296 fprintf (file, "}");
3297 }
3298
3299 /* Dump dataflow info. */
3300 void
3301 df_dump (df, flags, file)
3302 struct df *df;
3303 int flags;
3304 FILE *file;
3305 {
3306 unsigned int j;
3307 basic_block bb;
3308
3309 if (! df || ! file)
3310 return;
3311
3312 fprintf (file, "\nDataflow summary:\n");
3313 fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n",
3314 df->n_regs, df->n_defs, df->n_uses, df->n_bbs);
3315
3316 if (flags & DF_RD)
3317 {
3318 basic_block bb;
3319
3320 fprintf (file, "Reaching defs:\n");
3321 FOR_EACH_BB (bb)
3322 {
3323 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3324
3325 if (! bb_info->rd_in)
3326 continue;
3327
3328 fprintf (file, "bb %d in \t", bb->index);
3329 dump_bitmap (file, bb_info->rd_in);
3330 fprintf (file, "bb %d gen \t", bb->index);
3331 dump_bitmap (file, bb_info->rd_gen);
3332 fprintf (file, "bb %d kill\t", bb->index);
3333 dump_bitmap (file, bb_info->rd_kill);
3334 fprintf (file, "bb %d out \t", bb->index);
3335 dump_bitmap (file, bb_info->rd_out);
3336 }
3337 }
3338
3339 if (flags & DF_UD_CHAIN)
3340 {
3341 fprintf (file, "Use-def chains:\n");
3342 for (j = 0; j < df->n_defs; j++)
3343 {
3344 if (df->defs[j])
3345 {
3346 fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
3347 j, DF_REF_BBNO (df->defs[j]),
3348 DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
3349 DF_REF_INSN_UID (df->defs[j]),
3350 DF_REF_REGNO (df->defs[j]));
3351 if (df->defs[j]->flags & DF_REF_READ_WRITE)
3352 fprintf (file, "read/write ");
3353 df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
3354 fprintf (file, "\n");
3355 }
3356 }
3357 }
3358
3359 if (flags & DF_RU)
3360 {
3361 fprintf (file, "Reaching uses:\n");
3362 FOR_EACH_BB (bb)
3363 {
3364 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3365
3366 if (! bb_info->ru_in)
3367 continue;
3368
3369 fprintf (file, "bb %d in \t", bb->index);
3370 dump_bitmap (file, bb_info->ru_in);
3371 fprintf (file, "bb %d gen \t", bb->index);
3372 dump_bitmap (file, bb_info->ru_gen);
3373 fprintf (file, "bb %d kill\t", bb->index);
3374 dump_bitmap (file, bb_info->ru_kill);
3375 fprintf (file, "bb %d out \t", bb->index);
3376 dump_bitmap (file, bb_info->ru_out);
3377 }
3378 }
3379
3380 if (flags & DF_DU_CHAIN)
3381 {
3382 fprintf (file, "Def-use chains:\n");
3383 for (j = 0; j < df->n_uses; j++)
3384 {
3385 if (df->uses[j])
3386 {
3387 fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
3388 j, DF_REF_BBNO (df->uses[j]),
3389 DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
3390 DF_REF_INSN_UID (df->uses[j]),
3391 DF_REF_REGNO (df->uses[j]));
3392 if (df->uses[j]->flags & DF_REF_READ_WRITE)
3393 fprintf (file, "read/write ");
3394 df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
3395 fprintf (file, "\n");
3396 }
3397 }
3398 }
3399
3400 if (flags & DF_LR)
3401 {
3402 fprintf (file, "Live regs:\n");
3403 FOR_EACH_BB (bb)
3404 {
3405 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3406
3407 if (! bb_info->lr_in)
3408 continue;
3409
3410 fprintf (file, "bb %d in \t", bb->index);
3411 dump_bitmap (file, bb_info->lr_in);
3412 fprintf (file, "bb %d use \t", bb->index);
3413 dump_bitmap (file, bb_info->lr_use);
3414 fprintf (file, "bb %d def \t", bb->index);
3415 dump_bitmap (file, bb_info->lr_def);
3416 fprintf (file, "bb %d out \t", bb->index);
3417 dump_bitmap (file, bb_info->lr_out);
3418 }
3419 }
3420
3421 if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN))
3422 {
3423 struct reg_info *reg_info = df->regs;
3424
3425 fprintf (file, "Register info:\n");
3426 for (j = 0; j < df->n_regs; j++)
3427 {
3428 if (((flags & DF_REG_INFO)
3429 && (reg_info[j].n_uses || reg_info[j].n_defs))
3430 || ((flags & DF_RD_CHAIN) && reg_info[j].defs)
3431 || ((flags & DF_RU_CHAIN) && reg_info[j].uses))
3432 {
3433 fprintf (file, "reg %d", j);
3434 if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
3435 {
3436 basic_block bb = df_regno_bb (df, j);
3437
3438 if (bb)
3439 fprintf (file, " bb %d", bb->index);
3440 else
3441 fprintf (file, " bb ?");
3442 }
3443 if (flags & DF_REG_INFO)
3444 {
3445 fprintf (file, " life %d", reg_info[j].lifetime);
3446 }
3447
3448 if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
3449 {
3450 fprintf (file, " defs ");
3451 if (flags & DF_REG_INFO)
3452 fprintf (file, "%d ", reg_info[j].n_defs);
3453 if (flags & DF_RD_CHAIN)
3454 df_chain_dump (reg_info[j].defs, file);
3455 }
3456
3457 if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
3458 {
3459 fprintf (file, " uses ");
3460 if (flags & DF_REG_INFO)
3461 fprintf (file, "%d ", reg_info[j].n_uses);
3462 if (flags & DF_RU_CHAIN)
3463 df_chain_dump (reg_info[j].uses, file);
3464 }
3465
3466 fprintf (file, "\n");
3467 }
3468 }
3469 }
3470 fprintf (file, "\n");
3471 }
3472
3473
3474 void
3475 df_insn_debug (df, insn, file)
3476 struct df *df;
3477 rtx insn;
3478 FILE *file;
3479 {
3480 unsigned int uid;
3481 int bbi;
3482
3483 uid = INSN_UID (insn);
3484 if (uid >= df->insn_size)
3485 return;
3486
3487 if (df->insns[uid].defs)
3488 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3489 else if (df->insns[uid].uses)
3490 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3491 else
3492 bbi = -1;
3493
3494 fprintf (file, "insn %d bb %d luid %d defs ",
3495 uid, bbi, DF_INSN_LUID (df, insn));
3496 df_chain_dump (df->insns[uid].defs, file);
3497 fprintf (file, " uses ");
3498 df_chain_dump (df->insns[uid].uses, file);
3499 fprintf (file, "\n");
3500 }
3501
3502 void
3503 df_insn_debug_regno (df, insn, file)
3504 struct df *df;
3505 rtx insn;
3506 FILE *file;
3507 {
3508 unsigned int uid;
3509 int bbi;
3510
3511 uid = INSN_UID (insn);
3512 if (uid >= df->insn_size)
3513 return;
3514
3515 if (df->insns[uid].defs)
3516 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3517 else if (df->insns[uid].uses)
3518 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3519 else
3520 bbi = -1;
3521
3522 fprintf (file, "insn %d bb %d luid %d defs ",
3523 uid, bbi, DF_INSN_LUID (df, insn));
3524 df_chain_dump_regno (df->insns[uid].defs, file);
3525 fprintf (file, " uses ");
3526 df_chain_dump_regno (df->insns[uid].uses, file);
3527 fprintf (file, "\n");
3528 }
3529
3530 static void
3531 df_regno_debug (df, regno, file)
3532 struct df *df;
3533 unsigned int regno;
3534 FILE *file;
3535 {
3536 if (regno >= df->reg_size)
3537 return;
3538
3539 fprintf (file, "reg %d life %d defs ",
3540 regno, df->regs[regno].lifetime);
3541 df_chain_dump (df->regs[regno].defs, file);
3542 fprintf (file, " uses ");
3543 df_chain_dump (df->regs[regno].uses, file);
3544 fprintf (file, "\n");
3545 }
3546
3547
3548 static void
3549 df_ref_debug (df, ref, file)
3550 struct df *df;
3551 struct ref *ref;
3552 FILE *file;
3553 {
3554 fprintf (file, "%c%d ",
3555 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
3556 DF_REF_ID (ref));
3557 fprintf (file, "reg %d bb %d luid %d insn %d chain ",
3558 DF_REF_REGNO (ref),
3559 DF_REF_BBNO (ref),
3560 DF_INSN_LUID (df, DF_REF_INSN (ref)),
3561 INSN_UID (DF_REF_INSN (ref)));
3562 df_chain_dump (DF_REF_CHAIN (ref), file);
3563 fprintf (file, "\n");
3564 }
3565
3566
3567 void
3568 debug_df_insn (insn)
3569 rtx insn;
3570 {
3571 df_insn_debug (ddf, insn, stderr);
3572 debug_rtx (insn);
3573 }
3574
3575
3576 void
3577 debug_df_reg (reg)
3578 rtx reg;
3579 {
3580 df_regno_debug (ddf, REGNO (reg), stderr);
3581 }
3582
3583
3584 void
3585 debug_df_regno (regno)
3586 unsigned int regno;
3587 {
3588 df_regno_debug (ddf, regno, stderr);
3589 }
3590
3591
3592 void
3593 debug_df_ref (ref)
3594 struct ref *ref;
3595 {
3596 df_ref_debug (ddf, ref, stderr);
3597 }
3598
3599
3600 void
3601 debug_df_defno (defno)
3602 unsigned int defno;
3603 {
3604 df_ref_debug (ddf, ddf->defs[defno], stderr);
3605 }
3606
3607
3608 void
3609 debug_df_useno (defno)
3610 unsigned int defno;
3611 {
3612 df_ref_debug (ddf, ddf->uses[defno], stderr);
3613 }
3614
3615
3616 void
3617 debug_df_chain (link)
3618 struct df_link *link;
3619 {
3620 df_chain_dump (link, stderr);
3621 fputc ('\n', stderr);
3622 }
3623
3624 /* Hybrid search algorithm from "Implementation Techniques for
3625 Efficient Data-Flow Analysis of Large Programs". */
3626 static void
3627 hybrid_search_bitmap (block, in, out, gen, kill, dir,
3628 conf_op, transfun, visited, pending,
3629 data)
3630 basic_block block;
3631 bitmap *in, *out, *gen, *kill;
3632 enum df_flow_dir dir;
3633 enum df_confluence_op conf_op;
3634 transfer_function_bitmap transfun;
3635 sbitmap visited;
3636 sbitmap pending;
3637 void *data;
3638 {
3639 int changed;
3640 int i = block->index;
3641 edge e;
3642 basic_block bb = block;
3643 SET_BIT (visited, block->index);
3644 if (TEST_BIT (pending, block->index))
3645 {
3646 if (dir == FORWARD)
3647 {
3648 /* Calculate <conf_op> of predecessor_outs */
3649 bitmap_zero (in[i]);
3650 for (e = bb->pred; e != 0; e = e->pred_next)
3651 {
3652 if (e->src == ENTRY_BLOCK_PTR)
3653 continue;
3654 switch (conf_op)
3655 {
3656 case UNION:
3657 bitmap_a_or_b (in[i], in[i], out[e->src->index]);
3658 break;
3659 case INTERSECTION:
3660 bitmap_a_and_b (in[i], in[i], out[e->src->index]);
3661 break;
3662 }
3663 }
3664 }
3665 else
3666 {
3667 /* Calculate <conf_op> of successor ins */
3668 bitmap_zero (out[i]);
3669 for (e = bb->succ; e != 0; e = e->succ_next)
3670 {
3671 if (e->dest == EXIT_BLOCK_PTR)
3672 continue;
3673 switch (conf_op)
3674 {
3675 case UNION:
3676 bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3677 break;
3678 case INTERSECTION:
3679 bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3680 break;
3681 }
3682 }
3683 }
3684 /* Common part */
3685 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3686 RESET_BIT (pending, i);
3687 if (changed)
3688 {
3689 if (dir == FORWARD)
3690 {
3691 for (e = bb->succ; e != 0; e = e->succ_next)
3692 {
3693 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3694 continue;
3695 SET_BIT (pending, e->dest->index);
3696 }
3697 }
3698 else
3699 {
3700 for (e = bb->pred; e != 0; e = e->pred_next)
3701 {
3702 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3703 continue;
3704 SET_BIT (pending, e->src->index);
3705 }
3706 }
3707 }
3708 }
3709 if (dir == FORWARD)
3710 {
3711 for (e = bb->succ; e != 0; e = e->succ_next)
3712 {
3713 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3714 continue;
3715 if (!TEST_BIT (visited, e->dest->index))
3716 hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
3717 conf_op, transfun, visited, pending,
3718 data);
3719 }
3720 }
3721 else
3722 {
3723 for (e = bb->pred; e != 0; e = e->pred_next)
3724 {
3725 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3726 continue;
3727 if (!TEST_BIT (visited, e->src->index))
3728 hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
3729 conf_op, transfun, visited, pending,
3730 data);
3731 }
3732 }
3733 }
3734
3735
3736 /* Hybrid search for sbitmaps, rather than bitmaps. */
3737 static void
3738 hybrid_search_sbitmap (block, in, out, gen, kill, dir,
3739 conf_op, transfun, visited, pending,
3740 data)
3741 basic_block block;
3742 sbitmap *in, *out, *gen, *kill;
3743 enum df_flow_dir dir;
3744 enum df_confluence_op conf_op;
3745 transfer_function_sbitmap transfun;
3746 sbitmap visited;
3747 sbitmap pending;
3748 void *data;
3749 {
3750 int changed;
3751 int i = block->index;
3752 edge e;
3753 basic_block bb = block;
3754 SET_BIT (visited, block->index);
3755 if (TEST_BIT (pending, block->index))
3756 {
3757 if (dir == FORWARD)
3758 {
3759 /* Calculate <conf_op> of predecessor_outs */
3760 sbitmap_zero (in[i]);
3761 for (e = bb->pred; e != 0; e = e->pred_next)
3762 {
3763 if (e->src == ENTRY_BLOCK_PTR)
3764 continue;
3765 switch (conf_op)
3766 {
3767 case UNION:
3768 sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
3769 break;
3770 case INTERSECTION:
3771 sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
3772 break;
3773 }
3774 }
3775 }
3776 else
3777 {
3778 /* Calculate <conf_op> of successor ins */
3779 sbitmap_zero (out[i]);
3780 for (e = bb->succ; e != 0; e = e->succ_next)
3781 {
3782 if (e->dest == EXIT_BLOCK_PTR)
3783 continue;
3784 switch (conf_op)
3785 {
3786 case UNION:
3787 sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3788 break;
3789 case INTERSECTION:
3790 sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3791 break;
3792 }
3793 }
3794 }
3795 /* Common part */
3796 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3797 RESET_BIT (pending, i);
3798 if (changed)
3799 {
3800 if (dir == FORWARD)
3801 {
3802 for (e = bb->succ; e != 0; e = e->succ_next)
3803 {
3804 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3805 continue;
3806 SET_BIT (pending, e->dest->index);
3807 }
3808 }
3809 else
3810 {
3811 for (e = bb->pred; e != 0; e = e->pred_next)
3812 {
3813 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3814 continue;
3815 SET_BIT (pending, e->src->index);
3816 }
3817 }
3818 }
3819 }
3820 if (dir == FORWARD)
3821 {
3822 for (e = bb->succ; e != 0; e = e->succ_next)
3823 {
3824 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3825 continue;
3826 if (!TEST_BIT (visited, e->dest->index))
3827 hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
3828 conf_op, transfun, visited, pending,
3829 data);
3830 }
3831 }
3832 else
3833 {
3834 for (e = bb->pred; e != 0; e = e->pred_next)
3835 {
3836 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3837 continue;
3838 if (!TEST_BIT (visited, e->src->index))
3839 hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
3840 conf_op, transfun, visited, pending,
3841 data);
3842 }
3843 }
3844 }
3845
3846
3847
3848
3849 /* gen = GEN set.
3850 kill = KILL set.
3851 in, out = Filled in by function.
3852 blocks = Blocks to analyze.
3853 dir = Dataflow direction.
3854 conf_op = Confluence operation.
3855 transfun = Transfer function.
3856 order = Order to iterate in. (Should map block numbers -> order)
3857 data = Whatever you want. It's passed to the transfer function.
3858
3859 This function will perform iterative bitvector dataflow, producing
3860 the in and out sets. Even if you only want to perform it for a
3861 small number of blocks, the vectors for in and out must be large
3862 enough for *all* blocks, because changing one block might affect
3863 others. However, it'll only put what you say to analyze on the
3864 initial worklist.
3865
3866 For forward problems, you probably want to pass in a mapping of
3867 block number to rc_order (like df->inverse_rc_map).
3868 */
3869 void
3870 iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
3871 dir, conf_op, transfun, order, data)
3872 sbitmap *in, *out, *gen, *kill;
3873 bitmap blocks;
3874 enum df_flow_dir dir;
3875 enum df_confluence_op conf_op;
3876 transfer_function_sbitmap transfun;
3877 int *order;
3878 void *data;
3879 {
3880 int i;
3881 fibheap_t worklist;
3882 basic_block bb;
3883 sbitmap visited, pending;
3884 pending = sbitmap_alloc (last_basic_block);
3885 visited = sbitmap_alloc (last_basic_block);
3886 sbitmap_zero (pending);
3887 sbitmap_zero (visited);
3888 worklist = fibheap_new ();
3889 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3890 {
3891 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3892 SET_BIT (pending, i);
3893 if (dir == FORWARD)
3894 sbitmap_copy (out[i], gen[i]);
3895 else
3896 sbitmap_copy (in[i], gen[i]);
3897 });
3898 while (sbitmap_first_set_bit (pending) != -1)
3899 {
3900 while (!fibheap_empty (worklist))
3901 {
3902 i = (size_t) fibheap_extract_min (worklist);
3903 bb = BASIC_BLOCK (i);
3904 if (!TEST_BIT (visited, bb->index))
3905 hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
3906 conf_op, transfun, visited, pending, data);
3907 }
3908 if (sbitmap_first_set_bit (pending) != -1)
3909 {
3910 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3911 {
3912 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3913 });
3914 sbitmap_zero (visited);
3915 }
3916 else
3917 {
3918 break;
3919 }
3920 }
3921 sbitmap_free (pending);
3922 sbitmap_free (visited);
3923 fibheap_delete (worklist);
3924 }
3925
3926 /* Exactly the same as iterative_dataflow_sbitmap, except it works on
3927 bitmaps instead */
3928 void
3929 iterative_dataflow_bitmap (in, out, gen, kill, blocks,
3930 dir, conf_op, transfun, order, data)
3931 bitmap *in, *out, *gen, *kill;
3932 bitmap blocks;
3933 enum df_flow_dir dir;
3934 enum df_confluence_op conf_op;
3935 transfer_function_bitmap transfun;
3936 int *order;
3937 void *data;
3938 {
3939 int i;
3940 fibheap_t worklist;
3941 basic_block bb;
3942 sbitmap visited, pending;
3943 pending = sbitmap_alloc (last_basic_block);
3944 visited = sbitmap_alloc (last_basic_block);
3945 sbitmap_zero (pending);
3946 sbitmap_zero (visited);
3947 worklist = fibheap_new ();
3948 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3949 {
3950 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3951 SET_BIT (pending, i);
3952 if (dir == FORWARD)
3953 bitmap_copy (out[i], gen[i]);
3954 else
3955 bitmap_copy (in[i], gen[i]);
3956 });
3957 while (sbitmap_first_set_bit (pending) != -1)
3958 {
3959 while (!fibheap_empty (worklist))
3960 {
3961 i = (size_t) fibheap_extract_min (worklist);
3962 bb = BASIC_BLOCK (i);
3963 if (!TEST_BIT (visited, bb->index))
3964 hybrid_search_bitmap (bb, in, out, gen, kill, dir,
3965 conf_op, transfun, visited, pending, data);
3966 }
3967 if (sbitmap_first_set_bit (pending) != -1)
3968 {
3969 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3970 {
3971 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3972 });
3973 sbitmap_zero (visited);
3974 }
3975 else
3976 {
3977 break;
3978 }
3979 }
3980 sbitmap_free (pending);
3981 sbitmap_free (visited);
3982 fibheap_delete (worklist);
3983 }