]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/df.c
8918a33c1e04008d5e8d151874136a3a4f887461
[thirdparty/gcc.git] / gcc / df.c
1 /* Dataflow support routines.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3 Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
4 mhayes@redhat.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA.
22
23
24 OVERVIEW:
25
26 This file provides some dataflow routines for computing reaching defs,
27 upward exposed uses, live variables, def-use chains, and use-def
28 chains. The global dataflow is performed using simple iterative
29 methods with a worklist and could be sped up by ordering the blocks
30 with a depth first search order.
31
32 A `struct ref' data structure (ref) is allocated for every register
33 reference (def or use) and this records the insn and bb the ref is
34 found within. The refs are linked together in chains of uses and defs
35 for each insn and for each register. Each ref also has a chain field
36 that links all the use refs for a def or all the def refs for a use.
37 This is used to create use-def or def-use chains.
38
39
40 USAGE:
41
42 Here's an example of using the dataflow routines.
43
44 struct df *df;
45
46 df = df_init ();
47
48 df_analyse (df, 0, DF_ALL);
49
50 df_dump (df, DF_ALL, stderr);
51
52 df_finish (df);
53
54
55 df_init simply creates a poor man's object (df) that needs to be
56 passed to all the dataflow routines. df_finish destroys this
57 object and frees up any allocated memory.
58
59 df_analyse performs the following:
60
61 1. Records defs and uses by scanning the insns in each basic block
62 or by scanning the insns queued by df_insn_modify.
63 2. Links defs and uses into insn-def and insn-use chains.
64 3. Links defs and uses into reg-def and reg-use chains.
65 4. Assigns LUIDs to each insn (for modified blocks).
66 5. Calculates local reaching definitions.
67 6. Calculates global reaching definitions.
68 7. Creates use-def chains.
69 8. Calculates local reaching uses (upwards exposed uses).
70 9. Calculates global reaching uses.
71 10. Creates def-use chains.
72 11. Calculates local live registers.
73 12. Calculates global live registers.
74 13. Calculates register lifetimes and determines local registers.
75
76
77 PHILOSOPHY:
78
79 Note that the dataflow information is not updated for every newly
80 deleted or created insn. If the dataflow information requires
81 updating then all the changed, new, or deleted insns needs to be
82 marked with df_insn_modify (or df_insns_modify) either directly or
83 indirectly (say through calling df_insn_delete). df_insn_modify
84 marks all the modified insns to get processed the next time df_analyse
85 is called.
86
87 Beware that tinkering with insns may invalidate the dataflow information.
88 The philosophy behind these routines is that once the dataflow
89 information has been gathered, the user should store what they require
90 before they tinker with any insn. Once a reg is replaced, for example,
91 then the reg-def/reg-use chains will point to the wrong place. Once a
92 whole lot of changes have been made, df_analyse can be called again
93 to update the dataflow information. Currently, this is not very smart
94 with regard to propagating changes to the dataflow so it should not
95 be called very often.
96
97
98 DATA STRUCTURES:
99
100 The basic object is a REF (reference) and this may either be a DEF
101 (definition) or a USE of a register.
102
103 These are linked into a variety of lists; namely reg-def, reg-use,
104 insn-def, insn-use, def-use, and use-def lists. For example,
105 the reg-def lists contain all the refs that define a given register
106 while the insn-use lists contain all the refs used by an insn.
107
108 Note that the reg-def and reg-use chains are generally short (except for the
109 hard registers) and thus it is much faster to search these chains
110 rather than searching the def or use bitmaps.
111
112 If the insns are in SSA form then the reg-def and use-def lists
113 should only contain the single defining ref.
114
115 TODO:
116
117 1) Incremental dataflow analysis.
118
119 Note that if a loop invariant insn is hoisted (or sunk), we do not
120 need to change the def-use or use-def chains. All we have to do is to
121 change the bb field for all the associated defs and uses and to
122 renumber the LUIDs for the original and new basic blocks of the insn.
123
124 When shadowing loop mems we create new uses and defs for new pseudos
125 so we do not affect the existing dataflow information.
126
127 My current strategy is to queue up all modified, created, or deleted
128 insns so when df_analyse is called we can easily determine all the new
129 or deleted refs. Currently the global dataflow information is
130 recomputed from scratch but this could be propagated more efficiently.
131
132 2) Improved global data flow computation using depth first search.
133
134 3) Reduced memory requirements.
135
136 We could operate a pool of ref structures. When a ref is deleted it
137 gets returned to the pool (say by linking on to a chain of free refs).
138 This will require a pair of bitmaps for defs and uses so that we can
139 tell which ones have been changed. Alternatively, we could
140 periodically squeeze the def and use tables and associated bitmaps and
141 renumber the def and use ids.
142
143 4) Ordering of reg-def and reg-use lists.
144
145 Should the first entry in the def list be the first def (within a BB)?
146 Similarly, should the first entry in the use list be the last use
147 (within a BB)?
148
149 5) Working with a sub-CFG.
150
151 Often the whole CFG does not need to be analysed, for example,
152 when optimising a loop, only certain registers are of interest.
153 Perhaps there should be a bitmap argument to df_analyse to specify
154 which registers should be analysed? */
155
156 #include "config.h"
157 #include "system.h"
158 #include "rtl.h"
159 #include "tm_p.h"
160 #include "insn-config.h"
161 #include "recog.h"
162 #include "function.h"
163 #include "regs.h"
164 #include "obstack.h"
165 #include "hard-reg-set.h"
166 #include "basic-block.h"
167 #include "sbitmap.h"
168 #include "bitmap.h"
169 #include "df.h"
170 #include "fibheap.h"
171
172 #define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
173 do { \
174 unsigned int node_; \
175 EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
176 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
177
178 static struct obstack df_ref_obstack;
179 static struct df *ddf;
180
181 static void df_reg_table_realloc PARAMS((struct df *, int));
182 #if 0
183 static void df_def_table_realloc PARAMS((struct df *, int));
184 #endif
185 static void df_insn_table_realloc PARAMS((struct df *, unsigned int));
186 static void df_bitmaps_alloc PARAMS((struct df *, int));
187 static void df_bitmaps_free PARAMS((struct df *, int));
188 static void df_free PARAMS((struct df *));
189 static void df_alloc PARAMS((struct df *, int));
190
191 static rtx df_reg_clobber_gen PARAMS((unsigned int));
192 static rtx df_reg_use_gen PARAMS((unsigned int));
193
194 static inline struct df_link *df_link_create PARAMS((struct ref *,
195 struct df_link *));
196 static struct df_link *df_ref_unlink PARAMS((struct df_link **, struct ref *));
197 static void df_def_unlink PARAMS((struct df *, struct ref *));
198 static void df_use_unlink PARAMS((struct df *, struct ref *));
199 static void df_insn_refs_unlink PARAMS ((struct df *, basic_block, rtx));
200 #if 0
201 static void df_bb_refs_unlink PARAMS ((struct df *, basic_block));
202 static void df_refs_unlink PARAMS ((struct df *, bitmap));
203 #endif
204
205 static struct ref *df_ref_create PARAMS((struct df *,
206 rtx, rtx *, rtx,
207 enum df_ref_type, enum df_ref_flags));
208 static void df_ref_record_1 PARAMS((struct df *, rtx, rtx *,
209 rtx, enum df_ref_type,
210 enum df_ref_flags));
211 static void df_ref_record PARAMS((struct df *, rtx, rtx *,
212 rtx, enum df_ref_type,
213 enum df_ref_flags));
214 static void df_def_record_1 PARAMS((struct df *, rtx, basic_block, rtx));
215 static void df_defs_record PARAMS((struct df *, rtx, basic_block, rtx));
216 static void df_uses_record PARAMS((struct df *, rtx *,
217 enum df_ref_type, basic_block, rtx,
218 enum df_ref_flags));
219 static void df_insn_refs_record PARAMS((struct df *, basic_block, rtx));
220 static void df_bb_refs_record PARAMS((struct df *, basic_block));
221 static void df_refs_record PARAMS((struct df *, bitmap));
222
223 static void df_bb_reg_def_chain_create PARAMS((struct df *, basic_block));
224 static void df_reg_def_chain_create PARAMS((struct df *, bitmap));
225 static void df_bb_reg_use_chain_create PARAMS((struct df *, basic_block));
226 static void df_reg_use_chain_create PARAMS((struct df *, bitmap));
227 static void df_bb_du_chain_create PARAMS((struct df *, basic_block, bitmap));
228 static void df_du_chain_create PARAMS((struct df *, bitmap));
229 static void df_bb_ud_chain_create PARAMS((struct df *, basic_block));
230 static void df_ud_chain_create PARAMS((struct df *, bitmap));
231 static void df_bb_rd_local_compute PARAMS((struct df *, basic_block));
232 static void df_rd_local_compute PARAMS((struct df *, bitmap));
233 static void df_bb_ru_local_compute PARAMS((struct df *, basic_block));
234 static void df_ru_local_compute PARAMS((struct df *, bitmap));
235 static void df_bb_lr_local_compute PARAMS((struct df *, basic_block));
236 static void df_lr_local_compute PARAMS((struct df *, bitmap));
237 static void df_bb_reg_info_compute PARAMS((struct df *, basic_block, bitmap));
238 static void df_reg_info_compute PARAMS((struct df *, bitmap));
239
240 static int df_bb_luids_set PARAMS((struct df *df, basic_block));
241 static int df_luids_set PARAMS((struct df *df, bitmap));
242
243 static int df_modified_p PARAMS ((struct df *, bitmap));
244 static int df_refs_queue PARAMS ((struct df *));
245 static int df_refs_process PARAMS ((struct df *));
246 static int df_bb_refs_update PARAMS ((struct df *, basic_block));
247 static int df_refs_update PARAMS ((struct df *));
248 static void df_analyse_1 PARAMS((struct df *, bitmap, int, int));
249
250 static void df_insns_modify PARAMS((struct df *, basic_block,
251 rtx, rtx));
252 static int df_rtx_mem_replace PARAMS ((rtx *, void *));
253 static int df_rtx_reg_replace PARAMS ((rtx *, void *));
254 void df_refs_reg_replace PARAMS ((struct df *, bitmap,
255 struct df_link *, rtx, rtx));
256
257 static int df_def_dominates_all_uses_p PARAMS((struct df *, struct ref *def));
258 static int df_def_dominates_uses_p PARAMS((struct df *,
259 struct ref *def, bitmap));
260 static struct ref *df_bb_regno_last_use_find PARAMS((struct df *, basic_block,
261 unsigned int));
262 static struct ref *df_bb_regno_first_def_find PARAMS((struct df *, basic_block,
263 unsigned int));
264 static struct ref *df_bb_insn_regno_last_use_find PARAMS((struct df *,
265 basic_block,
266 rtx, unsigned int));
267 static struct ref *df_bb_insn_regno_first_def_find PARAMS((struct df *,
268 basic_block,
269 rtx, unsigned int));
270
271 static void df_chain_dump PARAMS((struct df_link *, FILE *file));
272 static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
273 static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
274 static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
275 static void df_rd_transfer_function PARAMS ((int, int *, bitmap, bitmap,
276 bitmap, bitmap, void *));
277 static void df_ru_transfer_function PARAMS ((int, int *, bitmap, bitmap,
278 bitmap, bitmap, void *));
279 static void df_lr_transfer_function PARAMS ((int, int *, bitmap, bitmap,
280 bitmap, bitmap, void *));
281 static void hybrid_search_bitmap PARAMS ((basic_block, bitmap *, bitmap *,
282 bitmap *, bitmap *, enum df_flow_dir,
283 enum df_confluence_op,
284 transfer_function_bitmap,
285 sbitmap, sbitmap, void *));
286 static void hybrid_search_sbitmap PARAMS ((basic_block, sbitmap *, sbitmap *,
287 sbitmap *, sbitmap *, enum df_flow_dir,
288 enum df_confluence_op,
289 transfer_function_sbitmap,
290 sbitmap, sbitmap, void *));
291 static inline bool read_modify_subreg_p PARAMS ((rtx));
292
293 \f
294 /* Local memory allocation/deallocation routines. */
295
296
297 /* Increase the insn info table to have space for at least SIZE + 1
298 elements. */
299 static void
300 df_insn_table_realloc (df, size)
301 struct df *df;
302 unsigned int size;
303 {
304 size++;
305 if (size <= df->insn_size)
306 return;
307
308 /* Make the table a little larger than requested, so we don't need
309 to enlarge it so often. */
310 size += df->insn_size / 4;
311
312 df->insns = (struct insn_info *)
313 xrealloc (df->insns, size * sizeof (struct insn_info));
314
315 memset (df->insns + df->insn_size, 0,
316 (size - df->insn_size) * sizeof (struct insn_info));
317
318 df->insn_size = size;
319
320 if (! df->insns_modified)
321 {
322 df->insns_modified = BITMAP_XMALLOC ();
323 bitmap_zero (df->insns_modified);
324 }
325 }
326
327
328 /* Increase the reg info table by SIZE more elements. */
329 static void
330 df_reg_table_realloc (df, size)
331 struct df *df;
332 int size;
333 {
334 /* Make table 25 percent larger by default. */
335 if (! size)
336 size = df->reg_size / 4;
337
338 size += df->reg_size;
339 if (size < max_reg_num ())
340 size = max_reg_num ();
341
342 df->regs = (struct reg_info *)
343 xrealloc (df->regs, size * sizeof (struct reg_info));
344
345 /* Zero the new entries. */
346 memset (df->regs + df->reg_size, 0,
347 (size - df->reg_size) * sizeof (struct reg_info));
348
349 df->reg_size = size;
350 }
351
352
353 #if 0
354 /* Not currently used. */
355 static void
356 df_def_table_realloc (df, size)
357 struct df *df;
358 int size;
359 {
360 int i;
361 struct ref *refs;
362
363 /* Make table 25 percent larger by default. */
364 if (! size)
365 size = df->def_size / 4;
366
367 df->def_size += size;
368 df->defs = xrealloc (df->defs,
369 df->def_size * sizeof (*df->defs));
370
371 /* Allocate a new block of memory and link into list of blocks
372 that will need to be freed later. */
373
374 refs = xmalloc (size * sizeof (*refs));
375
376 /* Link all the new refs together, overloading the chain field. */
377 for (i = 0; i < size - 1; i++)
378 refs[i].chain = (struct df_link *)(refs + i + 1);
379 refs[size - 1].chain = 0;
380 }
381 #endif
382
383
384
385 /* Allocate bitmaps for each basic block. */
386 static void
387 df_bitmaps_alloc (df, flags)
388 struct df *df;
389 int flags;
390 {
391 int dflags = 0;
392 basic_block bb;
393
394 /* Free the bitmaps if they need resizing. */
395 if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
396 dflags |= DF_LR | DF_RU;
397 if ((flags & DF_RU) && df->n_uses < df->use_id)
398 dflags |= DF_RU;
399 if ((flags & DF_RD) && df->n_defs < df->def_id)
400 dflags |= DF_RD;
401
402 if (dflags)
403 df_bitmaps_free (df, dflags);
404
405 df->n_defs = df->def_id;
406 df->n_uses = df->use_id;
407
408 FOR_EACH_BB (bb)
409 {
410 struct bb_info *bb_info = DF_BB_INFO (df, bb);
411
412 if (flags & DF_RD && ! bb_info->rd_in)
413 {
414 /* Allocate bitmaps for reaching definitions. */
415 bb_info->rd_kill = BITMAP_XMALLOC ();
416 bitmap_zero (bb_info->rd_kill);
417 bb_info->rd_gen = BITMAP_XMALLOC ();
418 bitmap_zero (bb_info->rd_gen);
419 bb_info->rd_in = BITMAP_XMALLOC ();
420 bb_info->rd_out = BITMAP_XMALLOC ();
421 bb_info->rd_valid = 0;
422 }
423
424 if (flags & DF_RU && ! bb_info->ru_in)
425 {
426 /* Allocate bitmaps for upward exposed uses. */
427 bb_info->ru_kill = BITMAP_XMALLOC ();
428 bitmap_zero (bb_info->ru_kill);
429 /* Note the lack of symmetry. */
430 bb_info->ru_gen = BITMAP_XMALLOC ();
431 bitmap_zero (bb_info->ru_gen);
432 bb_info->ru_in = BITMAP_XMALLOC ();
433 bb_info->ru_out = BITMAP_XMALLOC ();
434 bb_info->ru_valid = 0;
435 }
436
437 if (flags & DF_LR && ! bb_info->lr_in)
438 {
439 /* Allocate bitmaps for live variables. */
440 bb_info->lr_def = BITMAP_XMALLOC ();
441 bitmap_zero (bb_info->lr_def);
442 bb_info->lr_use = BITMAP_XMALLOC ();
443 bitmap_zero (bb_info->lr_use);
444 bb_info->lr_in = BITMAP_XMALLOC ();
445 bb_info->lr_out = BITMAP_XMALLOC ();
446 bb_info->lr_valid = 0;
447 }
448 }
449 }
450
451
452 /* Free bitmaps for each basic block. */
453 static void
454 df_bitmaps_free (df, flags)
455 struct df *df ATTRIBUTE_UNUSED;
456 int flags;
457 {
458 basic_block bb;
459
460 FOR_EACH_BB (bb)
461 {
462 struct bb_info *bb_info = DF_BB_INFO (df, bb);
463
464 if (!bb_info)
465 continue;
466
467 if ((flags & DF_RD) && bb_info->rd_in)
468 {
469 /* Free bitmaps for reaching definitions. */
470 BITMAP_XFREE (bb_info->rd_kill);
471 bb_info->rd_kill = NULL;
472 BITMAP_XFREE (bb_info->rd_gen);
473 bb_info->rd_gen = NULL;
474 BITMAP_XFREE (bb_info->rd_in);
475 bb_info->rd_in = NULL;
476 BITMAP_XFREE (bb_info->rd_out);
477 bb_info->rd_out = NULL;
478 }
479
480 if ((flags & DF_RU) && bb_info->ru_in)
481 {
482 /* Free bitmaps for upward exposed uses. */
483 BITMAP_XFREE (bb_info->ru_kill);
484 bb_info->ru_kill = NULL;
485 BITMAP_XFREE (bb_info->ru_gen);
486 bb_info->ru_gen = NULL;
487 BITMAP_XFREE (bb_info->ru_in);
488 bb_info->ru_in = NULL;
489 BITMAP_XFREE (bb_info->ru_out);
490 bb_info->ru_out = NULL;
491 }
492
493 if ((flags & DF_LR) && bb_info->lr_in)
494 {
495 /* Free bitmaps for live variables. */
496 BITMAP_XFREE (bb_info->lr_def);
497 bb_info->lr_def = NULL;
498 BITMAP_XFREE (bb_info->lr_use);
499 bb_info->lr_use = NULL;
500 BITMAP_XFREE (bb_info->lr_in);
501 bb_info->lr_in = NULL;
502 BITMAP_XFREE (bb_info->lr_out);
503 bb_info->lr_out = NULL;
504 }
505 }
506 df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR));
507 }
508
509
510 /* Allocate and initialize dataflow memory. */
511 static void
512 df_alloc (df, n_regs)
513 struct df *df;
514 int n_regs;
515 {
516 int n_insns;
517 basic_block bb;
518
519 gcc_obstack_init (&df_ref_obstack);
520
521 /* Perhaps we should use LUIDs to save memory for the insn_refs
522 table. This is only a small saving; a few pointers. */
523 n_insns = get_max_uid () + 1;
524
525 df->def_id = 0;
526 df->n_defs = 0;
527 /* Approximate number of defs by number of insns. */
528 df->def_size = n_insns;
529 df->defs = xmalloc (df->def_size * sizeof (*df->defs));
530
531 df->use_id = 0;
532 df->n_uses = 0;
533 /* Approximate number of uses by twice number of insns. */
534 df->use_size = n_insns * 2;
535 df->uses = xmalloc (df->use_size * sizeof (*df->uses));
536
537 df->n_regs = n_regs;
538 df->n_bbs = last_basic_block;
539
540 /* Allocate temporary working array used during local dataflow analysis. */
541 df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
542
543 df_insn_table_realloc (df, n_insns);
544
545 df_reg_table_realloc (df, df->n_regs);
546
547 df->bbs_modified = BITMAP_XMALLOC ();
548 bitmap_zero (df->bbs_modified);
549
550 df->flags = 0;
551
552 df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
553
554 df->all_blocks = BITMAP_XMALLOC ();
555 FOR_EACH_BB (bb)
556 bitmap_set_bit (df->all_blocks, bb->index);
557 }
558
559
560 /* Free all the dataflow info. */
561 static void
562 df_free (df)
563 struct df *df;
564 {
565 df_bitmaps_free (df, DF_ALL);
566
567 if (df->bbs)
568 free (df->bbs);
569 df->bbs = 0;
570
571 if (df->insns)
572 free (df->insns);
573 df->insns = 0;
574 df->insn_size = 0;
575
576 if (df->defs)
577 free (df->defs);
578 df->defs = 0;
579 df->def_size = 0;
580 df->def_id = 0;
581
582 if (df->uses)
583 free (df->uses);
584 df->uses = 0;
585 df->use_size = 0;
586 df->use_id = 0;
587
588 if (df->regs)
589 free (df->regs);
590 df->regs = 0;
591 df->reg_size = 0;
592
593 if (df->bbs_modified)
594 BITMAP_XFREE (df->bbs_modified);
595 df->bbs_modified = 0;
596
597 if (df->insns_modified)
598 BITMAP_XFREE (df->insns_modified);
599 df->insns_modified = 0;
600
601 BITMAP_XFREE (df->all_blocks);
602 df->all_blocks = 0;
603
604 obstack_free (&df_ref_obstack, NULL);
605 }
606 \f
607 /* Local miscellaneous routines. */
608
609 /* Return a USE for register REGNO. */
610 static rtx df_reg_use_gen (regno)
611 unsigned int regno;
612 {
613 rtx reg;
614 rtx use;
615
616 reg = regno_reg_rtx[regno];
617
618 use = gen_rtx_USE (GET_MODE (reg), reg);
619 return use;
620 }
621
622
623 /* Return a CLOBBER for register REGNO. */
624 static rtx df_reg_clobber_gen (regno)
625 unsigned int regno;
626 {
627 rtx reg;
628 rtx use;
629
630 reg = regno_reg_rtx[regno];
631
632 use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
633 return use;
634 }
635 \f
636 /* Local chain manipulation routines. */
637
638 /* Create a link in a def-use or use-def chain. */
639 static inline struct df_link *
640 df_link_create (ref, next)
641 struct ref *ref;
642 struct df_link *next;
643 {
644 struct df_link *link;
645
646 link = (struct df_link *) obstack_alloc (&df_ref_obstack,
647 sizeof (*link));
648 link->next = next;
649 link->ref = ref;
650 return link;
651 }
652
653
654 /* Add REF to chain head pointed to by PHEAD. */
655 static struct df_link *
656 df_ref_unlink (phead, ref)
657 struct df_link **phead;
658 struct ref *ref;
659 {
660 struct df_link *link = *phead;
661
662 if (link)
663 {
664 if (! link->next)
665 {
666 /* Only a single ref. It must be the one we want.
667 If not, the def-use and use-def chains are likely to
668 be inconsistent. */
669 if (link->ref != ref)
670 abort ();
671 /* Now have an empty chain. */
672 *phead = NULL;
673 }
674 else
675 {
676 /* Multiple refs. One of them must be us. */
677 if (link->ref == ref)
678 *phead = link->next;
679 else
680 {
681 /* Follow chain. */
682 for (; link->next; link = link->next)
683 {
684 if (link->next->ref == ref)
685 {
686 /* Unlink from list. */
687 link->next = link->next->next;
688 return link->next;
689 }
690 }
691 }
692 }
693 }
694 return link;
695 }
696
697
698 /* Unlink REF from all def-use/use-def chains, etc. */
699 int
700 df_ref_remove (df, ref)
701 struct df *df;
702 struct ref *ref;
703 {
704 if (DF_REF_REG_DEF_P (ref))
705 {
706 df_def_unlink (df, ref);
707 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref);
708 }
709 else
710 {
711 df_use_unlink (df, ref);
712 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref);
713 }
714 return 1;
715 }
716
717
718 /* Unlink DEF from use-def and reg-def chains. */
719 static void
720 df_def_unlink (df, def)
721 struct df *df ATTRIBUTE_UNUSED;
722 struct ref *def;
723 {
724 struct df_link *du_link;
725 unsigned int dregno = DF_REF_REGNO (def);
726
727 /* Follow def-use chain to find all the uses of this def. */
728 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
729 {
730 struct ref *use = du_link->ref;
731
732 /* Unlink this def from the use-def chain. */
733 df_ref_unlink (&DF_REF_CHAIN (use), def);
734 }
735 DF_REF_CHAIN (def) = 0;
736
737 /* Unlink def from reg-def chain. */
738 df_ref_unlink (&df->regs[dregno].defs, def);
739
740 df->defs[DF_REF_ID (def)] = 0;
741 }
742
743
744 /* Unlink use from def-use and reg-use chains. */
745 static void
746 df_use_unlink (df, use)
747 struct df *df ATTRIBUTE_UNUSED;
748 struct ref *use;
749 {
750 struct df_link *ud_link;
751 unsigned int uregno = DF_REF_REGNO (use);
752
753 /* Follow use-def chain to find all the defs of this use. */
754 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
755 {
756 struct ref *def = ud_link->ref;
757
758 /* Unlink this use from the def-use chain. */
759 df_ref_unlink (&DF_REF_CHAIN (def), use);
760 }
761 DF_REF_CHAIN (use) = 0;
762
763 /* Unlink use from reg-use chain. */
764 df_ref_unlink (&df->regs[uregno].uses, use);
765
766 df->uses[DF_REF_ID (use)] = 0;
767 }
768 \f
769 /* Local routines for recording refs. */
770
771
772 /* Create a new ref of type DF_REF_TYPE for register REG at address
773 LOC within INSN of BB. */
774 static struct ref *
775 df_ref_create (df, reg, loc, insn, ref_type, ref_flags)
776 struct df *df;
777 rtx reg;
778 rtx *loc;
779 rtx insn;
780 enum df_ref_type ref_type;
781 enum df_ref_flags ref_flags;
782 {
783 struct ref *this_ref;
784 unsigned int uid;
785
786 this_ref = (struct ref *) obstack_alloc (&df_ref_obstack,
787 sizeof (*this_ref));
788 DF_REF_REG (this_ref) = reg;
789 DF_REF_LOC (this_ref) = loc;
790 DF_REF_INSN (this_ref) = insn;
791 DF_REF_CHAIN (this_ref) = 0;
792 DF_REF_TYPE (this_ref) = ref_type;
793 DF_REF_FLAGS (this_ref) = ref_flags;
794 uid = INSN_UID (insn);
795
796 if (ref_type == DF_REF_REG_DEF)
797 {
798 if (df->def_id >= df->def_size)
799 {
800 /* Make table 25 percent larger. */
801 df->def_size += (df->def_size / 4);
802 df->defs = xrealloc (df->defs,
803 df->def_size * sizeof (*df->defs));
804 }
805 DF_REF_ID (this_ref) = df->def_id;
806 df->defs[df->def_id++] = this_ref;
807 }
808 else
809 {
810 if (df->use_id >= df->use_size)
811 {
812 /* Make table 25 percent larger. */
813 df->use_size += (df->use_size / 4);
814 df->uses = xrealloc (df->uses,
815 df->use_size * sizeof (*df->uses));
816 }
817 DF_REF_ID (this_ref) = df->use_id;
818 df->uses[df->use_id++] = this_ref;
819 }
820 return this_ref;
821 }
822
823
824 /* Create a new reference of type DF_REF_TYPE for a single register REG,
825 used inside the LOC rtx of INSN. */
826 static void
827 df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags)
828 struct df *df;
829 rtx reg;
830 rtx *loc;
831 rtx insn;
832 enum df_ref_type ref_type;
833 enum df_ref_flags ref_flags;
834 {
835 df_ref_create (df, reg, loc, insn, ref_type, ref_flags);
836 }
837
838
839 /* Create new references of type DF_REF_TYPE for each part of register REG
840 at address LOC within INSN of BB. */
841 static void
842 df_ref_record (df, reg, loc, insn, ref_type, ref_flags)
843 struct df *df;
844 rtx reg;
845 rtx *loc;
846 rtx insn;
847 enum df_ref_type ref_type;
848 enum df_ref_flags ref_flags;
849 {
850 unsigned int regno;
851
852 if (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG)
853 abort ();
854
855 /* For the reg allocator we are interested in some SUBREG rtx's, but not
856 all. Notably only those representing a word extraction from a multi-word
857 reg. As written in the docu those should have the form
858 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
859 XXX Is that true? We could also use the global word_mode variable. */
860 if (GET_CODE (reg) == SUBREG
861 && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
862 || GET_MODE_SIZE (GET_MODE (reg))
863 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
864 {
865 loc = &SUBREG_REG (reg);
866 reg = *loc;
867 }
868
869 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
870 if (regno < FIRST_PSEUDO_REGISTER)
871 {
872 int i;
873 int endregno;
874
875 if (! (df->flags & DF_HARD_REGS))
876 return;
877
878 /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
879 for the mode, because we only want to add references to regs, which
880 are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
881 reference the whole reg 0 in DI mode (which would also include
882 reg 1, at least, if 0 and 1 are SImode registers). */
883 endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
884 if (GET_CODE (reg) == SUBREG)
885 regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
886 SUBREG_BYTE (reg), GET_MODE (reg));
887 endregno += regno;
888
889 for (i = regno; i < endregno; i++)
890 df_ref_record_1 (df, regno_reg_rtx[i],
891 loc, insn, ref_type, ref_flags);
892 }
893 else
894 {
895 df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags);
896 }
897 }
898
899 /* Writes to paradoxical subregs, or subregs which are too narrow
900 are read-modify-write. */
901
902 static inline bool
903 read_modify_subreg_p (x)
904 rtx x;
905 {
906 unsigned int isize, osize;
907 if (GET_CODE (x) != SUBREG)
908 return false;
909 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
910 osize = GET_MODE_SIZE (GET_MODE (x));
911 if (isize <= osize)
912 return true;
913 if (isize <= UNITS_PER_WORD)
914 return false;
915 if (osize >= UNITS_PER_WORD)
916 return false;
917 return true;
918 }
919
920 /* Process all the registers defined in the rtx, X. */
921 static void
922 df_def_record_1 (df, x, bb, insn)
923 struct df *df;
924 rtx x;
925 basic_block bb;
926 rtx insn;
927 {
928 rtx *loc = &SET_DEST (x);
929 rtx dst = *loc;
930 enum df_ref_flags flags = 0;
931
932 /* Some targets place small structures in registers for
933 return values of functions. */
934 if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
935 {
936 int i;
937
938 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
939 df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn);
940 return;
941 }
942
943 #ifdef CLASS_CANNOT_CHANGE_MODE
944 if (GET_CODE (dst) == SUBREG
945 && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
946 GET_MODE (SUBREG_REG (dst))))
947 flags |= DF_REF_MODE_CHANGE;
948 #endif
949
950 /* May be, we should flag the use of strict_low_part somehow. Might be
951 handy for the reg allocator. */
952 while (GET_CODE (dst) == STRICT_LOW_PART
953 || GET_CODE (dst) == ZERO_EXTRACT
954 || GET_CODE (dst) == SIGN_EXTRACT
955 || read_modify_subreg_p (dst))
956 {
957 /* Strict low part always contains SUBREG, but we don't want to make
958 it appear outside, as whole register is always considered. */
959 if (GET_CODE (dst) == STRICT_LOW_PART)
960 {
961 loc = &XEXP (dst, 0);
962 dst = *loc;
963 }
964 #ifdef CLASS_CANNOT_CHANGE_MODE
965 if (GET_CODE (dst) == SUBREG
966 && CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
967 GET_MODE (SUBREG_REG (dst))))
968 flags |= DF_REF_MODE_CHANGE;
969 #endif
970 loc = &XEXP (dst, 0);
971 dst = *loc;
972 flags |= DF_REF_READ_WRITE;
973 }
974
975 if (GET_CODE (dst) == REG
976 || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
977 df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
978 }
979
980
981 /* Process all the registers defined in the pattern rtx, X. */
982 static void
983 df_defs_record (df, x, bb, insn)
984 struct df *df;
985 rtx x;
986 basic_block bb;
987 rtx insn;
988 {
989 RTX_CODE code = GET_CODE (x);
990
991 if (code == SET || code == CLOBBER)
992 {
993 /* Mark the single def within the pattern. */
994 df_def_record_1 (df, x, bb, insn);
995 }
996 else if (code == PARALLEL)
997 {
998 int i;
999
1000 /* Mark the multiple defs within the pattern. */
1001 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1002 {
1003 code = GET_CODE (XVECEXP (x, 0, i));
1004 if (code == SET || code == CLOBBER)
1005 df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn);
1006 }
1007 }
1008 }
1009
1010
1011 /* Process all the registers used in the rtx at address LOC. */
1012 static void
1013 df_uses_record (df, loc, ref_type, bb, insn, flags)
1014 struct df *df;
1015 rtx *loc;
1016 enum df_ref_type ref_type;
1017 basic_block bb;
1018 rtx insn;
1019 enum df_ref_flags flags;
1020 {
1021 RTX_CODE code;
1022 rtx x;
1023 retry:
1024 x = *loc;
1025 if (!x)
1026 return;
1027 code = GET_CODE (x);
1028 switch (code)
1029 {
1030 case LABEL_REF:
1031 case SYMBOL_REF:
1032 case CONST_INT:
1033 case CONST:
1034 case CONST_DOUBLE:
1035 case CONST_VECTOR:
1036 case PC:
1037 case ADDR_VEC:
1038 case ADDR_DIFF_VEC:
1039 return;
1040
1041 case CLOBBER:
1042 /* If we are clobbering a MEM, mark any registers inside the address
1043 as being used. */
1044 if (GET_CODE (XEXP (x, 0)) == MEM)
1045 df_uses_record (df, &XEXP (XEXP (x, 0), 0),
1046 DF_REF_REG_MEM_STORE, bb, insn, flags);
1047
1048 /* If we're clobbering a REG then we have a def so ignore. */
1049 return;
1050
1051 case MEM:
1052 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, flags);
1053 return;
1054
1055 case SUBREG:
1056 /* While we're here, optimize this case. */
1057
1058 /* In case the SUBREG is not of a register, don't optimize. */
1059 if (GET_CODE (SUBREG_REG (x)) != REG)
1060 {
1061 loc = &SUBREG_REG (x);
1062 df_uses_record (df, loc, ref_type, bb, insn, flags);
1063 return;
1064 }
1065 #ifdef CLASS_CANNOT_CHANGE_MODE
1066 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (x),
1067 GET_MODE (SUBREG_REG (x))))
1068 flags |= DF_REF_MODE_CHANGE;
1069 #endif
1070
1071 /* ... Fall through ... */
1072
1073 case REG:
1074 /* See a register (or subreg) other than being set. */
1075 df_ref_record (df, x, loc, insn, ref_type, flags);
1076 return;
1077
1078 case SET:
1079 {
1080 rtx dst = SET_DEST (x);
1081
1082 df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
1083
1084 switch (GET_CODE (dst))
1085 {
1086 enum df_ref_flags use_flags;
1087 case SUBREG:
1088 if (read_modify_subreg_p (dst))
1089 {
1090 use_flags = DF_REF_READ_WRITE;
1091 #ifdef CLASS_CANNOT_CHANGE_MODE
1092 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
1093 GET_MODE (SUBREG_REG (dst))))
1094 use_flags |= DF_REF_MODE_CHANGE;
1095 #endif
1096 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1097 insn, use_flags);
1098 break;
1099 }
1100 /* ... FALLTHRU ... */
1101 case REG:
1102 case PC:
1103 case PARALLEL:
1104 break;
1105 case MEM:
1106 df_uses_record (df, &XEXP (dst, 0),
1107 DF_REF_REG_MEM_STORE,
1108 bb, insn, 0);
1109 break;
1110 case STRICT_LOW_PART:
1111 /* A strict_low_part uses the whole reg not only the subreg. */
1112 dst = XEXP (dst, 0);
1113 if (GET_CODE (dst) != SUBREG)
1114 abort ();
1115 use_flags = DF_REF_READ_WRITE;
1116 #ifdef CLASS_CANNOT_CHANGE_MODE
1117 if (CLASS_CANNOT_CHANGE_MODE_P (GET_MODE (dst),
1118 GET_MODE (SUBREG_REG (dst))))
1119 use_flags |= DF_REF_MODE_CHANGE;
1120 #endif
1121 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1122 insn, use_flags);
1123 break;
1124 case ZERO_EXTRACT:
1125 case SIGN_EXTRACT:
1126 df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn,
1127 DF_REF_READ_WRITE);
1128 df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0);
1129 df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0);
1130 dst = XEXP (dst, 0);
1131 break;
1132 default:
1133 abort ();
1134 }
1135 return;
1136 }
1137
1138 case RETURN:
1139 break;
1140
1141 case ASM_OPERANDS:
1142 case UNSPEC_VOLATILE:
1143 case TRAP_IF:
1144 case ASM_INPUT:
1145 {
1146 /* Traditional and volatile asm instructions must be considered to use
1147 and clobber all hard registers, all pseudo-registers and all of
1148 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1149
1150 Consider for instance a volatile asm that changes the fpu rounding
1151 mode. An insn should not be moved across this even if it only uses
1152 pseudo-regs because it might give an incorrectly rounded result.
1153
1154 For now, just mark any regs we can find in ASM_OPERANDS as
1155 used. */
1156
1157 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1158 We can not just fall through here since then we would be confused
1159 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1160 traditional asms unlike their normal usage. */
1161 if (code == ASM_OPERANDS)
1162 {
1163 int j;
1164
1165 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1166 df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
1167 DF_REF_REG_USE, bb, insn, 0);
1168 return;
1169 }
1170 break;
1171 }
1172
1173 case PRE_DEC:
1174 case POST_DEC:
1175 case PRE_INC:
1176 case POST_INC:
1177 case PRE_MODIFY:
1178 case POST_MODIFY:
1179 /* Catch the def of the register being modified. */
1180 df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), insn, DF_REF_REG_DEF, DF_REF_READ_WRITE);
1181
1182 /* ... Fall through to handle uses ... */
1183
1184 default:
1185 break;
1186 }
1187
1188 /* Recursively scan the operands of this expression. */
1189 {
1190 const char *fmt = GET_RTX_FORMAT (code);
1191 int i;
1192
1193 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1194 {
1195 if (fmt[i] == 'e')
1196 {
1197 /* Tail recursive case: save a function call level. */
1198 if (i == 0)
1199 {
1200 loc = &XEXP (x, 0);
1201 goto retry;
1202 }
1203 df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags);
1204 }
1205 else if (fmt[i] == 'E')
1206 {
1207 int j;
1208 for (j = 0; j < XVECLEN (x, i); j++)
1209 df_uses_record (df, &XVECEXP (x, i, j), ref_type,
1210 bb, insn, flags);
1211 }
1212 }
1213 }
1214 }
1215
1216
1217 /* Record all the df within INSN of basic block BB. */
1218 static void
1219 df_insn_refs_record (df, bb, insn)
1220 struct df *df;
1221 basic_block bb;
1222 rtx insn;
1223 {
1224 int i;
1225
1226 if (INSN_P (insn))
1227 {
1228 rtx note;
1229
1230 /* Record register defs */
1231 df_defs_record (df, PATTERN (insn), bb, insn);
1232
1233 if (df->flags & DF_EQUIV_NOTES)
1234 for (note = REG_NOTES (insn); note;
1235 note = XEXP (note, 1))
1236 {
1237 switch (REG_NOTE_KIND (note))
1238 {
1239 case REG_EQUIV:
1240 case REG_EQUAL:
1241 df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
1242 bb, insn, 0);
1243 default:
1244 break;
1245 }
1246 }
1247
1248 if (GET_CODE (insn) == CALL_INSN)
1249 {
1250 rtx note;
1251 rtx x;
1252
1253 /* Record the registers used to pass arguments. */
1254 for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1255 note = XEXP (note, 1))
1256 {
1257 if (GET_CODE (XEXP (note, 0)) == USE)
1258 df_uses_record (df, &XEXP (XEXP (note, 0), 0), DF_REF_REG_USE,
1259 bb, insn, 0);
1260 }
1261
1262 /* The stack ptr is used (honorarily) by a CALL insn. */
1263 x = df_reg_use_gen (STACK_POINTER_REGNUM);
1264 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_USE, bb, insn, 0);
1265
1266 if (df->flags & DF_HARD_REGS)
1267 {
1268 /* Calls may also reference any of the global registers,
1269 so they are recorded as used. */
1270 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1271 if (global_regs[i])
1272 {
1273 x = df_reg_use_gen (i);
1274 df_uses_record (df, &SET_DEST (x),
1275 DF_REF_REG_USE, bb, insn, 0);
1276 }
1277 }
1278 }
1279
1280 /* Record the register uses. */
1281 df_uses_record (df, &PATTERN (insn),
1282 DF_REF_REG_USE, bb, insn, 0);
1283
1284
1285 if (GET_CODE (insn) == CALL_INSN)
1286 {
1287 rtx note;
1288
1289 if (df->flags & DF_HARD_REGS)
1290 {
1291 /* Kill all registers invalidated by a call. */
1292 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1293 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1294 {
1295 rtx reg_clob = df_reg_clobber_gen (i);
1296 df_defs_record (df, reg_clob, bb, insn);
1297 }
1298 }
1299
1300 /* There may be extra registers to be clobbered. */
1301 for (note = CALL_INSN_FUNCTION_USAGE (insn);
1302 note;
1303 note = XEXP (note, 1))
1304 if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1305 df_defs_record (df, XEXP (note, 0), bb, insn);
1306 }
1307 }
1308 }
1309
1310
1311 /* Record all the refs within the basic block BB. */
1312 static void
1313 df_bb_refs_record (df, bb)
1314 struct df *df;
1315 basic_block bb;
1316 {
1317 rtx insn;
1318
1319 /* Scan the block an insn at a time from beginning to end. */
1320 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1321 {
1322 if (INSN_P (insn))
1323 {
1324 /* Record defs within INSN. */
1325 df_insn_refs_record (df, bb, insn);
1326 }
1327 if (insn == bb->end)
1328 break;
1329 }
1330 }
1331
1332
1333 /* Record all the refs in the basic blocks specified by BLOCKS. */
1334 static void
1335 df_refs_record (df, blocks)
1336 struct df *df;
1337 bitmap blocks;
1338 {
1339 basic_block bb;
1340
1341 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1342 {
1343 df_bb_refs_record (df, bb);
1344 });
1345 }
1346 \f
1347 /* Dataflow analysis routines. */
1348
1349
1350 /* Create reg-def chains for basic block BB. These are a list of
1351 definitions for each register. */
1352 static void
1353 df_bb_reg_def_chain_create (df, bb)
1354 struct df *df;
1355 basic_block bb;
1356 {
1357 rtx insn;
1358
1359 /* Perhaps the defs should be sorted using a depth first search
1360 of the CFG (or possibly a breadth first search). We currently
1361 scan the basic blocks in reverse order so that the first defs
1362 appear at the start of the chain. */
1363
1364 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1365 insn = PREV_INSN (insn))
1366 {
1367 struct df_link *link;
1368 unsigned int uid = INSN_UID (insn);
1369
1370 if (! INSN_P (insn))
1371 continue;
1372
1373 for (link = df->insns[uid].defs; link; link = link->next)
1374 {
1375 struct ref *def = link->ref;
1376 unsigned int dregno = DF_REF_REGNO (def);
1377 /* Don't add ref's to the chain two times. I.e. only add
1378 new refs. XXX the same could be done by testing if the current
1379 insn is a modified (or a new) one. This would be faster. */
1380 if (DF_REF_ID (def) < df->def_id_save)
1381 continue;
1382
1383 df->regs[dregno].defs
1384 = df_link_create (def, df->regs[dregno].defs);
1385 }
1386 }
1387 }
1388
1389
1390 /* Create reg-def chains for each basic block within BLOCKS. These
1391 are a list of definitions for each register. */
1392 static void
1393 df_reg_def_chain_create (df, blocks)
1394 struct df *df;
1395 bitmap blocks;
1396 {
1397 basic_block bb;
1398
1399 FOR_EACH_BB_IN_BITMAP/*_REV*/ (blocks, 0, bb,
1400 {
1401 df_bb_reg_def_chain_create (df, bb);
1402 });
1403 }
1404
1405
1406 /* Create reg-use chains for basic block BB. These are a list of uses
1407 for each register. */
1408 static void
1409 df_bb_reg_use_chain_create (df, bb)
1410 struct df *df;
1411 basic_block bb;
1412 {
1413 rtx insn;
1414
1415 /* Scan in forward order so that the last uses appear at the
1416 start of the chain. */
1417
1418 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1419 insn = NEXT_INSN (insn))
1420 {
1421 struct df_link *link;
1422 unsigned int uid = INSN_UID (insn);
1423
1424 if (! INSN_P (insn))
1425 continue;
1426
1427 for (link = df->insns[uid].uses; link; link = link->next)
1428 {
1429 struct ref *use = link->ref;
1430 unsigned int uregno = DF_REF_REGNO (use);
1431 /* Don't add ref's to the chain two times. I.e. only add
1432 new refs. XXX the same could be done by testing if the current
1433 insn is a modified (or a new) one. This would be faster. */
1434 if (DF_REF_ID (use) < df->use_id_save)
1435 continue;
1436
1437 df->regs[uregno].uses
1438 = df_link_create (use, df->regs[uregno].uses);
1439 }
1440 }
1441 }
1442
1443
1444 /* Create reg-use chains for each basic block within BLOCKS. These
1445 are a list of uses for each register. */
1446 static void
1447 df_reg_use_chain_create (df, blocks)
1448 struct df *df;
1449 bitmap blocks;
1450 {
1451 basic_block bb;
1452
1453 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1454 {
1455 df_bb_reg_use_chain_create (df, bb);
1456 });
1457 }
1458
1459
1460 /* Create def-use chains from reaching use bitmaps for basic block BB. */
1461 static void
1462 df_bb_du_chain_create (df, bb, ru)
1463 struct df *df;
1464 basic_block bb;
1465 bitmap ru;
1466 {
1467 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1468 rtx insn;
1469
1470 bitmap_copy (ru, bb_info->ru_out);
1471
1472 /* For each def in BB create a linked list (chain) of uses
1473 reached from the def. */
1474 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1475 insn = PREV_INSN (insn))
1476 {
1477 struct df_link *def_link;
1478 struct df_link *use_link;
1479 unsigned int uid = INSN_UID (insn);
1480
1481 if (! INSN_P (insn))
1482 continue;
1483
1484 /* For each def in insn... */
1485 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1486 {
1487 struct ref *def = def_link->ref;
1488 unsigned int dregno = DF_REF_REGNO (def);
1489
1490 DF_REF_CHAIN (def) = 0;
1491
1492 /* While the reg-use chains are not essential, it
1493 is _much_ faster to search these short lists rather
1494 than all the reaching uses, especially for large functions. */
1495 for (use_link = df->regs[dregno].uses; use_link;
1496 use_link = use_link->next)
1497 {
1498 struct ref *use = use_link->ref;
1499
1500 if (bitmap_bit_p (ru, DF_REF_ID (use)))
1501 {
1502 DF_REF_CHAIN (def)
1503 = df_link_create (use, DF_REF_CHAIN (def));
1504
1505 bitmap_clear_bit (ru, DF_REF_ID (use));
1506 }
1507 }
1508 }
1509
1510 /* For each use in insn... */
1511 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1512 {
1513 struct ref *use = use_link->ref;
1514 bitmap_set_bit (ru, DF_REF_ID (use));
1515 }
1516 }
1517 }
1518
1519
1520 /* Create def-use chains from reaching use bitmaps for basic blocks
1521 in BLOCKS. */
1522 static void
1523 df_du_chain_create (df, blocks)
1524 struct df *df;
1525 bitmap blocks;
1526 {
1527 bitmap ru;
1528 basic_block bb;
1529
1530 ru = BITMAP_XMALLOC ();
1531
1532 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1533 {
1534 df_bb_du_chain_create (df, bb, ru);
1535 });
1536
1537 BITMAP_XFREE (ru);
1538 }
1539
1540
1541 /* Create use-def chains from reaching def bitmaps for basic block BB. */
1542 static void
1543 df_bb_ud_chain_create (df, bb)
1544 struct df *df;
1545 basic_block bb;
1546 {
1547 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1548 struct ref **reg_def_last = df->reg_def_last;
1549 rtx insn;
1550
1551 memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *));
1552
1553 /* For each use in BB create a linked list (chain) of defs
1554 that reach the use. */
1555 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1556 insn = NEXT_INSN (insn))
1557 {
1558 unsigned int uid = INSN_UID (insn);
1559 struct df_link *use_link;
1560 struct df_link *def_link;
1561
1562 if (! INSN_P (insn))
1563 continue;
1564
1565 /* For each use in insn... */
1566 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1567 {
1568 struct ref *use = use_link->ref;
1569 unsigned int regno = DF_REF_REGNO (use);
1570
1571 DF_REF_CHAIN (use) = 0;
1572
1573 /* Has regno been defined in this BB yet? If so, use
1574 the last def as the single entry for the use-def
1575 chain for this use. Otherwise, we need to add all
1576 the defs using this regno that reach the start of
1577 this BB. */
1578 if (reg_def_last[regno])
1579 {
1580 DF_REF_CHAIN (use)
1581 = df_link_create (reg_def_last[regno], 0);
1582 }
1583 else
1584 {
1585 /* While the reg-def chains are not essential, it is
1586 _much_ faster to search these short lists rather than
1587 all the reaching defs, especially for large
1588 functions. */
1589 for (def_link = df->regs[regno].defs; def_link;
1590 def_link = def_link->next)
1591 {
1592 struct ref *def = def_link->ref;
1593
1594 if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def)))
1595 {
1596 DF_REF_CHAIN (use)
1597 = df_link_create (def, DF_REF_CHAIN (use));
1598 }
1599 }
1600 }
1601 }
1602
1603
1604 /* For each def in insn...record the last def of each reg. */
1605 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1606 {
1607 struct ref *def = def_link->ref;
1608 int dregno = DF_REF_REGNO (def);
1609
1610 reg_def_last[dregno] = def;
1611 }
1612 }
1613 }
1614
1615
1616 /* Create use-def chains from reaching def bitmaps for basic blocks
1617 within BLOCKS. */
1618 static void
1619 df_ud_chain_create (df, blocks)
1620 struct df *df;
1621 bitmap blocks;
1622 {
1623 basic_block bb;
1624
1625 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1626 {
1627 df_bb_ud_chain_create (df, bb);
1628 });
1629 }
1630 \f
1631
1632
1633 static void
1634 df_rd_transfer_function (bb, changed, in, out, gen, kill, data)
1635 int bb ATTRIBUTE_UNUSED;
1636 int *changed;
1637 bitmap in, out, gen, kill;
1638 void *data ATTRIBUTE_UNUSED;
1639 {
1640 *changed = bitmap_union_of_diff (out, gen, in, kill);
1641 }
1642 static void
1643 df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
1644 int bb ATTRIBUTE_UNUSED;
1645 int *changed;
1646 bitmap in, out, gen, kill;
1647 void *data ATTRIBUTE_UNUSED;
1648 {
1649 *changed = bitmap_union_of_diff (in, gen, out, kill);
1650 }
1651
1652 static void
1653 df_lr_transfer_function (bb, changed, in, out, use, def, data)
1654 int bb ATTRIBUTE_UNUSED;
1655 int *changed;
1656 bitmap in, out, use, def;
1657 void *data ATTRIBUTE_UNUSED;
1658 {
1659 *changed = bitmap_union_of_diff (in, use, out, def);
1660 }
1661
1662
1663 /* Compute local reaching def info for basic block BB. */
1664 static void
1665 df_bb_rd_local_compute (df, bb)
1666 struct df *df;
1667 basic_block bb;
1668 {
1669 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1670 rtx insn;
1671
1672 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1673 insn = NEXT_INSN (insn))
1674 {
1675 unsigned int uid = INSN_UID (insn);
1676 struct df_link *def_link;
1677
1678 if (! INSN_P (insn))
1679 continue;
1680
1681 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1682 {
1683 struct ref *def = def_link->ref;
1684 unsigned int regno = DF_REF_REGNO (def);
1685 struct df_link *def2_link;
1686
1687 for (def2_link = df->regs[regno].defs; def2_link;
1688 def2_link = def2_link->next)
1689 {
1690 struct ref *def2 = def2_link->ref;
1691
1692 /* Add all defs of this reg to the set of kills. This
1693 is greedy since many of these defs will not actually
1694 be killed by this BB but it keeps things a lot
1695 simpler. */
1696 bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
1697
1698 /* Zap from the set of gens for this BB. */
1699 bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
1700 }
1701
1702 bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
1703 }
1704 }
1705
1706 bb_info->rd_valid = 1;
1707 }
1708
1709
1710 /* Compute local reaching def info for each basic block within BLOCKS. */
1711 static void
1712 df_rd_local_compute (df, blocks)
1713 struct df *df;
1714 bitmap blocks;
1715 {
1716 basic_block bb;
1717
1718 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1719 {
1720 df_bb_rd_local_compute (df, bb);
1721 });
1722 }
1723
1724
1725 /* Compute local reaching use (upward exposed use) info for basic
1726 block BB. */
1727 static void
1728 df_bb_ru_local_compute (df, bb)
1729 struct df *df;
1730 basic_block bb;
1731 {
1732 /* This is much more tricky than computing reaching defs. With
1733 reaching defs, defs get killed by other defs. With upwards
1734 exposed uses, these get killed by defs with the same regno. */
1735
1736 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1737 rtx insn;
1738
1739
1740 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1741 insn = PREV_INSN (insn))
1742 {
1743 unsigned int uid = INSN_UID (insn);
1744 struct df_link *def_link;
1745 struct df_link *use_link;
1746
1747 if (! INSN_P (insn))
1748 continue;
1749
1750 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1751 {
1752 struct ref *def = def_link->ref;
1753 unsigned int dregno = DF_REF_REGNO (def);
1754
1755 for (use_link = df->regs[dregno].uses; use_link;
1756 use_link = use_link->next)
1757 {
1758 struct ref *use = use_link->ref;
1759
1760 /* Add all uses of this reg to the set of kills. This
1761 is greedy since many of these uses will not actually
1762 be killed by this BB but it keeps things a lot
1763 simpler. */
1764 bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use));
1765
1766 /* Zap from the set of gens for this BB. */
1767 bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use));
1768 }
1769 }
1770
1771 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1772 {
1773 struct ref *use = use_link->ref;
1774 /* Add use to set of gens in this BB. */
1775 bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
1776 }
1777 }
1778 bb_info->ru_valid = 1;
1779 }
1780
1781
1782 /* Compute local reaching use (upward exposed use) info for each basic
1783 block within BLOCKS. */
1784 static void
1785 df_ru_local_compute (df, blocks)
1786 struct df *df;
1787 bitmap blocks;
1788 {
1789 basic_block bb;
1790
1791 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1792 {
1793 df_bb_ru_local_compute (df, bb);
1794 });
1795 }
1796
1797
1798 /* Compute local live variable info for basic block BB. */
1799 static void
1800 df_bb_lr_local_compute (df, bb)
1801 struct df *df;
1802 basic_block bb;
1803 {
1804 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1805 rtx insn;
1806
1807 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1808 insn = PREV_INSN (insn))
1809 {
1810 unsigned int uid = INSN_UID (insn);
1811 struct df_link *link;
1812
1813 if (! INSN_P (insn))
1814 continue;
1815
1816 for (link = df->insns[uid].defs; link; link = link->next)
1817 {
1818 struct ref *def = link->ref;
1819 unsigned int dregno = DF_REF_REGNO (def);
1820
1821 /* Add def to set of defs in this BB. */
1822 bitmap_set_bit (bb_info->lr_def, dregno);
1823
1824 bitmap_clear_bit (bb_info->lr_use, dregno);
1825 }
1826
1827 for (link = df->insns[uid].uses; link; link = link->next)
1828 {
1829 struct ref *use = link->ref;
1830 /* Add use to set of uses in this BB. */
1831 bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
1832 }
1833 }
1834 bb_info->lr_valid = 1;
1835 }
1836
1837
1838 /* Compute local live variable info for each basic block within BLOCKS. */
1839 static void
1840 df_lr_local_compute (df, blocks)
1841 struct df *df;
1842 bitmap blocks;
1843 {
1844 basic_block bb;
1845
1846 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1847 {
1848 df_bb_lr_local_compute (df, bb);
1849 });
1850 }
1851
1852
1853 /* Compute register info: lifetime, bb, and number of defs and uses
1854 for basic block BB. */
1855 static void
1856 df_bb_reg_info_compute (df, bb, live)
1857 struct df *df;
1858 basic_block bb;
1859 bitmap live;
1860 {
1861 struct reg_info *reg_info = df->regs;
1862 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1863 rtx insn;
1864
1865 bitmap_copy (live, bb_info->lr_out);
1866
1867 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1868 insn = PREV_INSN (insn))
1869 {
1870 unsigned int uid = INSN_UID (insn);
1871 unsigned int regno;
1872 struct df_link *link;
1873
1874 if (! INSN_P (insn))
1875 continue;
1876
1877 for (link = df->insns[uid].defs; link; link = link->next)
1878 {
1879 struct ref *def = link->ref;
1880 unsigned int dregno = DF_REF_REGNO (def);
1881
1882 /* Kill this register. */
1883 bitmap_clear_bit (live, dregno);
1884 reg_info[dregno].n_defs++;
1885 }
1886
1887 for (link = df->insns[uid].uses; link; link = link->next)
1888 {
1889 struct ref *use = link->ref;
1890 unsigned int uregno = DF_REF_REGNO (use);
1891
1892 /* This register is now live. */
1893 bitmap_set_bit (live, uregno);
1894 reg_info[uregno].n_uses++;
1895 }
1896
1897 /* Increment lifetimes of all live registers. */
1898 EXECUTE_IF_SET_IN_BITMAP (live, 0, regno,
1899 {
1900 reg_info[regno].lifetime++;
1901 });
1902 }
1903 }
1904
1905
1906 /* Compute register info: lifetime, bb, and number of defs and uses. */
1907 static void
1908 df_reg_info_compute (df, blocks)
1909 struct df *df;
1910 bitmap blocks;
1911 {
1912 basic_block bb;
1913 bitmap live;
1914
1915 live = BITMAP_XMALLOC ();
1916
1917 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1918 {
1919 df_bb_reg_info_compute (df, bb, live);
1920 });
1921
1922 BITMAP_XFREE (live);
1923 }
1924
1925
1926 /* Assign LUIDs for BB. */
1927 static int
1928 df_bb_luids_set (df, bb)
1929 struct df *df;
1930 basic_block bb;
1931 {
1932 rtx insn;
1933 int luid = 0;
1934
1935 /* The LUIDs are monotonically increasing for each basic block. */
1936
1937 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1938 {
1939 if (INSN_P (insn))
1940 DF_INSN_LUID (df, insn) = luid++;
1941 DF_INSN_LUID (df, insn) = luid;
1942
1943 if (insn == bb->end)
1944 break;
1945 }
1946 return luid;
1947 }
1948
1949
1950 /* Assign LUIDs for each basic block within BLOCKS. */
1951 static int
1952 df_luids_set (df, blocks)
1953 struct df *df;
1954 bitmap blocks;
1955 {
1956 basic_block bb;
1957 int total = 0;
1958
1959 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1960 {
1961 total += df_bb_luids_set (df, bb);
1962 });
1963 return total;
1964 }
1965
1966 /* Perform dataflow analysis using existing DF structure for blocks
1967 within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
1968 static void
1969 df_analyse_1 (df, blocks, flags, update)
1970 struct df *df;
1971 bitmap blocks;
1972 int flags;
1973 int update;
1974 {
1975 int aflags;
1976 int dflags;
1977 int i;
1978 basic_block bb;
1979
1980 dflags = 0;
1981 aflags = flags;
1982 if (flags & DF_UD_CHAIN)
1983 aflags |= DF_RD | DF_RD_CHAIN;
1984
1985 if (flags & DF_DU_CHAIN)
1986 aflags |= DF_RU;
1987
1988 if (flags & DF_RU)
1989 aflags |= DF_RU_CHAIN;
1990
1991 if (flags & DF_REG_INFO)
1992 aflags |= DF_LR;
1993
1994 if (! blocks)
1995 blocks = df->all_blocks;
1996
1997 df->flags = flags;
1998 if (update)
1999 {
2000 df_refs_update (df);
2001 /* More fine grained incremental dataflow analysis would be
2002 nice. For now recompute the whole shebang for the
2003 modified blocks. */
2004 #if 0
2005 df_refs_unlink (df, blocks);
2006 #endif
2007 /* All the def-use, use-def chains can be potentially
2008 modified by changes in one block. The size of the
2009 bitmaps can also change. */
2010 }
2011 else
2012 {
2013 /* Scan the function for all register defs and uses. */
2014 df_refs_queue (df);
2015 df_refs_record (df, blocks);
2016
2017 /* Link all the new defs and uses to the insns. */
2018 df_refs_process (df);
2019 }
2020
2021 /* Allocate the bitmaps now the total number of defs and uses are
2022 known. If the number of defs or uses have changed, then
2023 these bitmaps need to be reallocated. */
2024 df_bitmaps_alloc (df, aflags);
2025
2026 /* Set the LUIDs for each specified basic block. */
2027 df_luids_set (df, blocks);
2028
2029 /* Recreate reg-def and reg-use chains from scratch so that first
2030 def is at the head of the reg-def chain and the last use is at
2031 the head of the reg-use chain. This is only important for
2032 regs local to a basic block as it speeds up searching. */
2033 if (aflags & DF_RD_CHAIN)
2034 {
2035 df_reg_def_chain_create (df, blocks);
2036 }
2037
2038 if (aflags & DF_RU_CHAIN)
2039 {
2040 df_reg_use_chain_create (df, blocks);
2041 }
2042
2043 df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
2044 df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
2045 df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
2046 df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
2047 df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
2048 df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
2049
2050 flow_depth_first_order_compute (df->dfs_order, df->rc_order);
2051 flow_reverse_top_sort_order_compute (df->rts_order);
2052 for (i = 0; i < n_basic_blocks; i ++)
2053 {
2054 df->inverse_dfs_map[df->dfs_order[i]] = i;
2055 df->inverse_rc_map[df->rc_order[i]] = i;
2056 df->inverse_rts_map[df->rts_order[i]] = i;
2057 }
2058 if (aflags & DF_RD)
2059 {
2060 /* Compute the sets of gens and kills for the defs of each bb. */
2061 df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
2062 {
2063 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2064 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2065 bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
2066 bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
2067 FOR_EACH_BB (bb)
2068 {
2069 in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
2070 out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
2071 gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
2072 kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
2073 }
2074 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2075 FORWARD, UNION, df_rd_transfer_function,
2076 df->inverse_rc_map, NULL);
2077 free (in);
2078 free (out);
2079 free (gen);
2080 free (kill);
2081 }
2082 }
2083
2084 if (aflags & DF_UD_CHAIN)
2085 {
2086 /* Create use-def chains. */
2087 df_ud_chain_create (df, df->all_blocks);
2088
2089 if (! (flags & DF_RD))
2090 dflags |= DF_RD;
2091 }
2092
2093 if (aflags & DF_RU)
2094 {
2095 /* Compute the sets of gens and kills for the upwards exposed
2096 uses in each bb. */
2097 df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
2098 {
2099 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2100 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2101 bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
2102 bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
2103 FOR_EACH_BB (bb)
2104 {
2105 in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
2106 out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
2107 gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
2108 kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
2109 }
2110 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2111 BACKWARD, UNION, df_ru_transfer_function,
2112 df->inverse_rts_map, NULL);
2113 free (in);
2114 free (out);
2115 free (gen);
2116 free (kill);
2117 }
2118 }
2119
2120 if (aflags & DF_DU_CHAIN)
2121 {
2122 /* Create def-use chains. */
2123 df_du_chain_create (df, df->all_blocks);
2124
2125 if (! (flags & DF_RU))
2126 dflags |= DF_RU;
2127 }
2128
2129 /* Free up bitmaps that are no longer required. */
2130 if (dflags)
2131 df_bitmaps_free (df, dflags);
2132
2133 if (aflags & DF_LR)
2134 {
2135 /* Compute the sets of defs and uses of live variables. */
2136 df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
2137 {
2138 bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
2139 bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
2140 bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
2141 bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
2142 FOR_EACH_BB (bb)
2143 {
2144 in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
2145 out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
2146 use[bb->index] = DF_BB_INFO (df, bb)->lr_use;
2147 def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
2148 }
2149 iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
2150 BACKWARD, UNION, df_lr_transfer_function,
2151 df->inverse_rts_map, NULL);
2152 free (in);
2153 free (out);
2154 free (use);
2155 free (def);
2156 }
2157 }
2158
2159 if (aflags & DF_REG_INFO)
2160 {
2161 df_reg_info_compute (df, df->all_blocks);
2162 }
2163 free (df->dfs_order);
2164 free (df->rc_order);
2165 free (df->rts_order);
2166 free (df->inverse_rc_map);
2167 free (df->inverse_dfs_map);
2168 free (df->inverse_rts_map);
2169 }
2170
2171
2172 /* Initialize dataflow analysis. */
2173 struct df *
2174 df_init ()
2175 {
2176 struct df *df;
2177
2178 df = xcalloc (1, sizeof (struct df));
2179
2180 /* Squirrel away a global for debugging. */
2181 ddf = df;
2182
2183 return df;
2184 }
2185
2186
2187 /* Start queuing refs. */
2188 static int
2189 df_refs_queue (df)
2190 struct df *df;
2191 {
2192 df->def_id_save = df->def_id;
2193 df->use_id_save = df->use_id;
2194 /* ???? Perhaps we should save current obstack state so that we can
2195 unwind it. */
2196 return 0;
2197 }
2198
2199
2200 /* Process queued refs. */
2201 static int
2202 df_refs_process (df)
2203 struct df *df;
2204 {
2205 unsigned int i;
2206
2207 /* Build new insn-def chains. */
2208 for (i = df->def_id_save; i != df->def_id; i++)
2209 {
2210 struct ref *def = df->defs[i];
2211 unsigned int uid = DF_REF_INSN_UID (def);
2212
2213 /* Add def to head of def list for INSN. */
2214 df->insns[uid].defs
2215 = df_link_create (def, df->insns[uid].defs);
2216 }
2217
2218 /* Build new insn-use chains. */
2219 for (i = df->use_id_save; i != df->use_id; i++)
2220 {
2221 struct ref *use = df->uses[i];
2222 unsigned int uid = DF_REF_INSN_UID (use);
2223
2224 /* Add use to head of use list for INSN. */
2225 df->insns[uid].uses
2226 = df_link_create (use, df->insns[uid].uses);
2227 }
2228 return 0;
2229 }
2230
2231
2232 /* Update refs for basic block BB. */
2233 static int
2234 df_bb_refs_update (df, bb)
2235 struct df *df;
2236 basic_block bb;
2237 {
2238 rtx insn;
2239 int count = 0;
2240
2241 /* While we have to scan the chain of insns for this BB, we don't
2242 need to allocate and queue a long chain of BB/INSN pairs. Using
2243 a bitmap for insns_modified saves memory and avoids queuing
2244 duplicates. */
2245
2246 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2247 {
2248 unsigned int uid;
2249
2250 uid = INSN_UID (insn);
2251
2252 if (bitmap_bit_p (df->insns_modified, uid))
2253 {
2254 /* Delete any allocated refs of this insn. MPH, FIXME. */
2255 df_insn_refs_unlink (df, bb, insn);
2256
2257 /* Scan the insn for refs. */
2258 df_insn_refs_record (df, bb, insn);
2259
2260 count++;
2261 }
2262 if (insn == bb->end)
2263 break;
2264 }
2265 return count;
2266 }
2267
2268
2269 /* Process all the modified/deleted insns that were queued. */
2270 static int
2271 df_refs_update (df)
2272 struct df *df;
2273 {
2274 basic_block bb;
2275 int count = 0;
2276
2277 if ((unsigned int)max_reg_num () >= df->reg_size)
2278 df_reg_table_realloc (df, 0);
2279
2280 df_refs_queue (df);
2281
2282 FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
2283 {
2284 count += df_bb_refs_update (df, bb);
2285 });
2286
2287 df_refs_process (df);
2288 return count;
2289 }
2290
2291
2292 /* Return non-zero if any of the requested blocks in the bitmap
2293 BLOCKS have been modified. */
2294 static int
2295 df_modified_p (df, blocks)
2296 struct df *df;
2297 bitmap blocks;
2298 {
2299 int update = 0;
2300 basic_block bb;
2301
2302 if (!df->n_bbs)
2303 return 0;
2304
2305 FOR_EACH_BB (bb)
2306 if (bitmap_bit_p (df->bbs_modified, bb->index)
2307 && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->index)))
2308 {
2309 update = 1;
2310 break;
2311 }
2312
2313 return update;
2314 }
2315
2316
2317 /* Analyse dataflow info for the basic blocks specified by the bitmap
2318 BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
2319 modified blocks if BLOCKS is -1. */
2320 int
2321 df_analyse (df, blocks, flags)
2322 struct df *df;
2323 bitmap blocks;
2324 int flags;
2325 {
2326 int update;
2327
2328 /* We could deal with additional basic blocks being created by
2329 rescanning everything again. */
2330 if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
2331 abort ();
2332
2333 update = df_modified_p (df, blocks);
2334 if (update || (flags != df->flags))
2335 {
2336 if (! blocks)
2337 {
2338 if (df->n_bbs)
2339 {
2340 /* Recompute everything from scratch. */
2341 df_free (df);
2342 }
2343 /* Allocate and initialize data structures. */
2344 df_alloc (df, max_reg_num ());
2345 df_analyse_1 (df, 0, flags, 0);
2346 update = 1;
2347 }
2348 else
2349 {
2350 if (blocks == (bitmap) -1)
2351 blocks = df->bbs_modified;
2352
2353 if (! df->n_bbs)
2354 abort ();
2355
2356 df_analyse_1 (df, blocks, flags, 1);
2357 bitmap_zero (df->bbs_modified);
2358 bitmap_zero (df->insns_modified);
2359 }
2360 }
2361 return update;
2362 }
2363
2364
2365 /* Free all the dataflow info and the DF structure. */
2366 void
2367 df_finish (df)
2368 struct df *df;
2369 {
2370 df_free (df);
2371 free (df);
2372 }
2373
2374
2375 /* Unlink INSN from its reference information. */
2376 static void
2377 df_insn_refs_unlink (df, bb, insn)
2378 struct df *df;
2379 basic_block bb ATTRIBUTE_UNUSED;
2380 rtx insn;
2381 {
2382 struct df_link *link;
2383 unsigned int uid;
2384
2385 uid = INSN_UID (insn);
2386
2387 /* Unlink all refs defined by this insn. */
2388 for (link = df->insns[uid].defs; link; link = link->next)
2389 df_def_unlink (df, link->ref);
2390
2391 /* Unlink all refs used by this insn. */
2392 for (link = df->insns[uid].uses; link; link = link->next)
2393 df_use_unlink (df, link->ref);
2394
2395 df->insns[uid].defs = 0;
2396 df->insns[uid].uses = 0;
2397 }
2398
2399
2400 #if 0
2401 /* Unlink all the insns within BB from their reference information. */
2402 static void
2403 df_bb_refs_unlink (df, bb)
2404 struct df *df;
2405 basic_block bb;
2406 {
2407 rtx insn;
2408
2409 /* Scan the block an insn at a time from beginning to end. */
2410 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2411 {
2412 if (INSN_P (insn))
2413 {
2414 /* Unlink refs for INSN. */
2415 df_insn_refs_unlink (df, bb, insn);
2416 }
2417 if (insn == bb->end)
2418 break;
2419 }
2420 }
2421
2422
2423 /* Unlink all the refs in the basic blocks specified by BLOCKS.
2424 Not currently used. */
2425 static void
2426 df_refs_unlink (df, blocks)
2427 struct df *df;
2428 bitmap blocks;
2429 {
2430 basic_block bb;
2431
2432 if (blocks)
2433 {
2434 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2435 {
2436 df_bb_refs_unlink (df, bb);
2437 });
2438 }
2439 else
2440 {
2441 FOR_EACH_BB (bb)
2442 df_bb_refs_unlink (df, bb);
2443 }
2444 }
2445 #endif
2446 \f
2447 /* Functions to modify insns. */
2448
2449
2450 /* Delete INSN and all its reference information. */
2451 rtx
2452 df_insn_delete (df, bb, insn)
2453 struct df *df;
2454 basic_block bb ATTRIBUTE_UNUSED;
2455 rtx insn;
2456 {
2457 /* If the insn is a jump, we should perhaps call delete_insn to
2458 handle the JUMP_LABEL? */
2459
2460 /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
2461 if (insn == bb->head)
2462 abort ();
2463
2464 /* Delete the insn. */
2465 delete_insn (insn);
2466
2467 df_insn_modify (df, bb, insn);
2468
2469 return NEXT_INSN (insn);
2470 }
2471
2472
2473 /* Mark that INSN within BB may have changed (created/modified/deleted).
2474 This may be called multiple times for the same insn. There is no
2475 harm calling this function if the insn wasn't changed; it will just
2476 slow down the rescanning of refs. */
2477 void
2478 df_insn_modify (df, bb, insn)
2479 struct df *df;
2480 basic_block bb;
2481 rtx insn;
2482 {
2483 unsigned int uid;
2484
2485 uid = INSN_UID (insn);
2486 if (uid >= df->insn_size)
2487 df_insn_table_realloc (df, uid);
2488
2489 bitmap_set_bit (df->bbs_modified, bb->index);
2490 bitmap_set_bit (df->insns_modified, uid);
2491
2492 /* For incremental updating on the fly, perhaps we could make a copy
2493 of all the refs of the original insn and turn them into
2494 anti-refs. When df_refs_update finds these anti-refs, it annihilates
2495 the original refs. If validate_change fails then these anti-refs
2496 will just get ignored. */
2497 }
2498
2499
2500 typedef struct replace_args
2501 {
2502 rtx match;
2503 rtx replacement;
2504 rtx insn;
2505 int modified;
2506 } replace_args;
2507
2508
2509 /* Replace mem pointed to by PX with its associated pseudo register.
2510 DATA is actually a pointer to a structure describing the
2511 instruction currently being scanned and the MEM we are currently
2512 replacing. */
2513 static int
2514 df_rtx_mem_replace (px, data)
2515 rtx *px;
2516 void *data;
2517 {
2518 replace_args *args = (replace_args *) data;
2519 rtx mem = *px;
2520
2521 if (mem == NULL_RTX)
2522 return 0;
2523
2524 switch (GET_CODE (mem))
2525 {
2526 case MEM:
2527 break;
2528
2529 case CONST_DOUBLE:
2530 /* We're not interested in the MEM associated with a
2531 CONST_DOUBLE, so there's no need to traverse into one. */
2532 return -1;
2533
2534 default:
2535 /* This is not a MEM. */
2536 return 0;
2537 }
2538
2539 if (!rtx_equal_p (args->match, mem))
2540 /* This is not the MEM we are currently replacing. */
2541 return 0;
2542
2543 /* Actually replace the MEM. */
2544 validate_change (args->insn, px, args->replacement, 1);
2545 args->modified++;
2546
2547 return 0;
2548 }
2549
2550
2551 int
2552 df_insn_mem_replace (df, bb, insn, mem, reg)
2553 struct df *df;
2554 basic_block bb;
2555 rtx insn;
2556 rtx mem;
2557 rtx reg;
2558 {
2559 replace_args args;
2560
2561 args.insn = insn;
2562 args.match = mem;
2563 args.replacement = reg;
2564 args.modified = 0;
2565
2566 /* Search and replace all matching mems within insn. */
2567 for_each_rtx (&insn, df_rtx_mem_replace, &args);
2568
2569 if (args.modified)
2570 df_insn_modify (df, bb, insn);
2571
2572 /* ???? FIXME. We may have a new def or one or more new uses of REG
2573 in INSN. REG should be a new pseudo so it won't affect the
2574 dataflow information that we currently have. We should add
2575 the new uses and defs to INSN and then recreate the chains
2576 when df_analyse is called. */
2577 return args.modified;
2578 }
2579
2580
2581 /* Replace one register with another. Called through for_each_rtx; PX
2582 points to the rtx being scanned. DATA is actually a pointer to a
2583 structure of arguments. */
2584 static int
2585 df_rtx_reg_replace (px, data)
2586 rtx *px;
2587 void *data;
2588 {
2589 rtx x = *px;
2590 replace_args *args = (replace_args *) data;
2591
2592 if (x == NULL_RTX)
2593 return 0;
2594
2595 if (x == args->match)
2596 {
2597 validate_change (args->insn, px, args->replacement, 1);
2598 args->modified++;
2599 }
2600
2601 return 0;
2602 }
2603
2604
2605 /* Replace the reg within every ref on CHAIN that is within the set
2606 BLOCKS of basic blocks with NEWREG. Also update the regs within
2607 REG_NOTES. */
2608 void
2609 df_refs_reg_replace (df, blocks, chain, oldreg, newreg)
2610 struct df *df;
2611 bitmap blocks;
2612 struct df_link *chain;
2613 rtx oldreg;
2614 rtx newreg;
2615 {
2616 struct df_link *link;
2617 replace_args args;
2618
2619 if (! blocks)
2620 blocks = df->all_blocks;
2621
2622 args.match = oldreg;
2623 args.replacement = newreg;
2624 args.modified = 0;
2625
2626 for (link = chain; link; link = link->next)
2627 {
2628 struct ref *ref = link->ref;
2629 rtx insn = DF_REF_INSN (ref);
2630
2631 if (! INSN_P (insn))
2632 continue;
2633
2634 if (bitmap_bit_p (blocks, DF_REF_BBNO (ref)))
2635 {
2636 df_ref_reg_replace (df, ref, oldreg, newreg);
2637
2638 /* Replace occurrences of the reg within the REG_NOTES. */
2639 if ((! link->next || DF_REF_INSN (ref)
2640 != DF_REF_INSN (link->next->ref))
2641 && REG_NOTES (insn))
2642 {
2643 args.insn = insn;
2644 for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
2645 }
2646 }
2647 else
2648 {
2649 /* Temporary check to ensure that we have a grip on which
2650 regs should be replaced. */
2651 abort ();
2652 }
2653 }
2654 }
2655
2656
2657 /* Replace all occurrences of register OLDREG with register NEWREG in
2658 blocks defined by bitmap BLOCKS. This also replaces occurrences of
2659 OLDREG in the REG_NOTES but only for insns containing OLDREG. This
2660 routine expects the reg-use and reg-def chains to be valid. */
2661 int
2662 df_reg_replace (df, blocks, oldreg, newreg)
2663 struct df *df;
2664 bitmap blocks;
2665 rtx oldreg;
2666 rtx newreg;
2667 {
2668 unsigned int oldregno = REGNO (oldreg);
2669
2670 df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg);
2671 df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg);
2672 return 1;
2673 }
2674
2675
2676 /* Try replacing the reg within REF with NEWREG. Do not modify
2677 def-use/use-def chains. */
2678 int
2679 df_ref_reg_replace (df, ref, oldreg, newreg)
2680 struct df *df;
2681 struct ref *ref;
2682 rtx oldreg;
2683 rtx newreg;
2684 {
2685 /* Check that insn was deleted by being converted into a NOTE. If
2686 so ignore this insn. */
2687 if (! INSN_P (DF_REF_INSN (ref)))
2688 return 0;
2689
2690 if (oldreg && oldreg != DF_REF_REG (ref))
2691 abort ();
2692
2693 if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
2694 return 0;
2695
2696 df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref));
2697 return 1;
2698 }
2699
2700
2701 struct ref*
2702 df_bb_def_use_swap (df, bb, def_insn, use_insn, regno)
2703 struct df * df;
2704 basic_block bb;
2705 rtx def_insn;
2706 rtx use_insn;
2707 unsigned int regno;
2708 {
2709 struct ref *def;
2710 struct ref *use;
2711 int def_uid;
2712 int use_uid;
2713 struct df_link *link;
2714
2715 def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno);
2716 if (! def)
2717 return 0;
2718
2719 use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno);
2720 if (! use)
2721 return 0;
2722
2723 /* The USE no longer exists. */
2724 use_uid = INSN_UID (use_insn);
2725 df_use_unlink (df, use);
2726 df_ref_unlink (&df->insns[use_uid].uses, use);
2727
2728 /* The DEF requires shifting so remove it from DEF_INSN
2729 and add it to USE_INSN by reusing LINK. */
2730 def_uid = INSN_UID (def_insn);
2731 link = df_ref_unlink (&df->insns[def_uid].defs, def);
2732 link->ref = def;
2733 link->next = df->insns[use_uid].defs;
2734 df->insns[use_uid].defs = link;
2735
2736 #if 0
2737 link = df_ref_unlink (&df->regs[regno].defs, def);
2738 link->ref = def;
2739 link->next = df->regs[regno].defs;
2740 df->insns[regno].defs = link;
2741 #endif
2742
2743 DF_REF_INSN (def) = use_insn;
2744 return def;
2745 }
2746
2747
2748 /* Record df between FIRST_INSN and LAST_INSN inclusive. All new
2749 insns must be processed by this routine. */
2750 static void
2751 df_insns_modify (df, bb, first_insn, last_insn)
2752 struct df *df;
2753 basic_block bb;
2754 rtx first_insn;
2755 rtx last_insn;
2756 {
2757 rtx insn;
2758
2759 for (insn = first_insn; ; insn = NEXT_INSN (insn))
2760 {
2761 unsigned int uid;
2762
2763 /* A non-const call should not have slipped through the net. If
2764 it does, we need to create a new basic block. Ouch. The
2765 same applies for a label. */
2766 if ((GET_CODE (insn) == CALL_INSN
2767 && ! CONST_OR_PURE_CALL_P (insn))
2768 || GET_CODE (insn) == CODE_LABEL)
2769 abort ();
2770
2771 uid = INSN_UID (insn);
2772
2773 if (uid >= df->insn_size)
2774 df_insn_table_realloc (df, uid);
2775
2776 df_insn_modify (df, bb, insn);
2777
2778 if (insn == last_insn)
2779 break;
2780 }
2781 }
2782
2783
2784 /* Emit PATTERN before INSN within BB. */
2785 rtx
2786 df_pattern_emit_before (df, pattern, bb, insn)
2787 struct df *df ATTRIBUTE_UNUSED;
2788 rtx pattern;
2789 basic_block bb;
2790 rtx insn;
2791 {
2792 rtx ret_insn;
2793 rtx prev_insn = PREV_INSN (insn);
2794
2795 /* We should not be inserting before the start of the block. */
2796 if (insn == bb->head)
2797 abort ();
2798 ret_insn = emit_insn_before (pattern, insn);
2799 if (ret_insn == insn)
2800 return ret_insn;
2801
2802 df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn);
2803 return ret_insn;
2804 }
2805
2806
2807 /* Emit PATTERN after INSN within BB. */
2808 rtx
2809 df_pattern_emit_after (df, pattern, bb, insn)
2810 struct df *df;
2811 rtx pattern;
2812 basic_block bb;
2813 rtx insn;
2814 {
2815 rtx ret_insn;
2816
2817 ret_insn = emit_insn_after (pattern, insn);
2818 if (ret_insn == insn)
2819 return ret_insn;
2820
2821 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2822 return ret_insn;
2823 }
2824
2825
2826 /* Emit jump PATTERN after INSN within BB. */
2827 rtx
2828 df_jump_pattern_emit_after (df, pattern, bb, insn)
2829 struct df *df;
2830 rtx pattern;
2831 basic_block bb;
2832 rtx insn;
2833 {
2834 rtx ret_insn;
2835
2836 ret_insn = emit_jump_insn_after (pattern, insn);
2837 if (ret_insn == insn)
2838 return ret_insn;
2839
2840 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2841 return ret_insn;
2842 }
2843
2844
2845 /* Move INSN within BB before BEFORE_INSN within BEFORE_BB.
2846
2847 This function should only be used to move loop invariant insns
2848 out of a loop where it has been proven that the def-use info
2849 will still be valid. */
2850 rtx
2851 df_insn_move_before (df, bb, insn, before_bb, before_insn)
2852 struct df *df;
2853 basic_block bb;
2854 rtx insn;
2855 basic_block before_bb;
2856 rtx before_insn;
2857 {
2858 struct df_link *link;
2859 unsigned int uid;
2860
2861 if (! bb)
2862 return df_pattern_emit_before (df, insn, before_bb, before_insn);
2863
2864 uid = INSN_UID (insn);
2865
2866 /* Change bb for all df defined and used by this insn. */
2867 for (link = df->insns[uid].defs; link; link = link->next)
2868 DF_REF_BB (link->ref) = before_bb;
2869 for (link = df->insns[uid].uses; link; link = link->next)
2870 DF_REF_BB (link->ref) = before_bb;
2871
2872 /* The lifetimes of the registers used in this insn will be reduced
2873 while the lifetimes of the registers defined in this insn
2874 are likely to be increased. */
2875
2876 /* ???? Perhaps all the insns moved should be stored on a list
2877 which df_analyse removes when it recalculates data flow. */
2878
2879 return emit_insn_before (insn, before_insn);
2880 }
2881 \f
2882 /* Functions to query dataflow information. */
2883
2884
2885 int
2886 df_insn_regno_def_p (df, bb, insn, regno)
2887 struct df *df;
2888 basic_block bb ATTRIBUTE_UNUSED;
2889 rtx insn;
2890 unsigned int regno;
2891 {
2892 unsigned int uid;
2893 struct df_link *link;
2894
2895 uid = INSN_UID (insn);
2896
2897 for (link = df->insns[uid].defs; link; link = link->next)
2898 {
2899 struct ref *def = link->ref;
2900
2901 if (DF_REF_REGNO (def) == regno)
2902 return 1;
2903 }
2904
2905 return 0;
2906 }
2907
2908
2909 static int
2910 df_def_dominates_all_uses_p (df, def)
2911 struct df *df ATTRIBUTE_UNUSED;
2912 struct ref *def;
2913 {
2914 struct df_link *du_link;
2915
2916 /* Follow def-use chain to find all the uses of this def. */
2917 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2918 {
2919 struct ref *use = du_link->ref;
2920 struct df_link *ud_link;
2921
2922 /* Follow use-def chain to check all the defs for this use. */
2923 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2924 if (ud_link->ref != def)
2925 return 0;
2926 }
2927 return 1;
2928 }
2929
2930
2931 int
2932 df_insn_dominates_all_uses_p (df, bb, insn)
2933 struct df *df;
2934 basic_block bb ATTRIBUTE_UNUSED;
2935 rtx insn;
2936 {
2937 unsigned int uid;
2938 struct df_link *link;
2939
2940 uid = INSN_UID (insn);
2941
2942 for (link = df->insns[uid].defs; link; link = link->next)
2943 {
2944 struct ref *def = link->ref;
2945
2946 if (! df_def_dominates_all_uses_p (df, def))
2947 return 0;
2948 }
2949
2950 return 1;
2951 }
2952
2953
2954 /* Return non-zero if all DF dominates all the uses within the bitmap
2955 BLOCKS. */
2956 static int
2957 df_def_dominates_uses_p (df, def, blocks)
2958 struct df *df ATTRIBUTE_UNUSED;
2959 struct ref *def;
2960 bitmap blocks;
2961 {
2962 struct df_link *du_link;
2963
2964 /* Follow def-use chain to find all the uses of this def. */
2965 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2966 {
2967 struct ref *use = du_link->ref;
2968 struct df_link *ud_link;
2969
2970 /* Only worry about the uses within BLOCKS. For example,
2971 consider a register defined within a loop that is live at the
2972 loop exits. */
2973 if (bitmap_bit_p (blocks, DF_REF_BBNO (use)))
2974 {
2975 /* Follow use-def chain to check all the defs for this use. */
2976 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2977 if (ud_link->ref != def)
2978 return 0;
2979 }
2980 }
2981 return 1;
2982 }
2983
2984
2985 /* Return non-zero if all the defs of INSN within BB dominates
2986 all the corresponding uses. */
2987 int
2988 df_insn_dominates_uses_p (df, bb, insn, blocks)
2989 struct df *df;
2990 basic_block bb ATTRIBUTE_UNUSED;
2991 rtx insn;
2992 bitmap blocks;
2993 {
2994 unsigned int uid;
2995 struct df_link *link;
2996
2997 uid = INSN_UID (insn);
2998
2999 for (link = df->insns[uid].defs; link; link = link->next)
3000 {
3001 struct ref *def = link->ref;
3002
3003 /* Only consider the defs within BLOCKS. */
3004 if (bitmap_bit_p (blocks, DF_REF_BBNO (def))
3005 && ! df_def_dominates_uses_p (df, def, blocks))
3006 return 0;
3007 }
3008 return 1;
3009 }
3010
3011
3012 /* Return the basic block that REG referenced in or NULL if referenced
3013 in multiple basic blocks. */
3014 basic_block
3015 df_regno_bb (df, regno)
3016 struct df *df;
3017 unsigned int regno;
3018 {
3019 struct df_link *defs = df->regs[regno].defs;
3020 struct df_link *uses = df->regs[regno].uses;
3021 struct ref *def = defs ? defs->ref : 0;
3022 struct ref *use = uses ? uses->ref : 0;
3023 basic_block bb_def = def ? DF_REF_BB (def) : 0;
3024 basic_block bb_use = use ? DF_REF_BB (use) : 0;
3025
3026 /* Compare blocks of first def and last use. ???? FIXME. What if
3027 the reg-def and reg-use lists are not correctly ordered. */
3028 return bb_def == bb_use ? bb_def : 0;
3029 }
3030
3031
3032 /* Return non-zero if REG used in multiple basic blocks. */
3033 int
3034 df_reg_global_p (df, reg)
3035 struct df *df;
3036 rtx reg;
3037 {
3038 return df_regno_bb (df, REGNO (reg)) != 0;
3039 }
3040
3041
3042 /* Return total lifetime (in insns) of REG. */
3043 int
3044 df_reg_lifetime (df, reg)
3045 struct df *df;
3046 rtx reg;
3047 {
3048 return df->regs[REGNO (reg)].lifetime;
3049 }
3050
3051
3052 /* Return non-zero if REG live at start of BB. */
3053 int
3054 df_bb_reg_live_start_p (df, bb, reg)
3055 struct df *df ATTRIBUTE_UNUSED;
3056 basic_block bb;
3057 rtx reg;
3058 {
3059 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3060
3061 #ifdef ENABLE_CHECKING
3062 if (! bb_info->lr_in)
3063 abort ();
3064 #endif
3065
3066 return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
3067 }
3068
3069
3070 /* Return non-zero if REG live at end of BB. */
3071 int
3072 df_bb_reg_live_end_p (df, bb, reg)
3073 struct df *df ATTRIBUTE_UNUSED;
3074 basic_block bb;
3075 rtx reg;
3076 {
3077 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3078
3079 #ifdef ENABLE_CHECKING
3080 if (! bb_info->lr_in)
3081 abort ();
3082 #endif
3083
3084 return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
3085 }
3086
3087
3088 /* Return -1 if life of REG1 before life of REG2, 1 if life of REG1
3089 after life of REG2, or 0, if the lives overlap. */
3090 int
3091 df_bb_regs_lives_compare (df, bb, reg1, reg2)
3092 struct df *df;
3093 basic_block bb;
3094 rtx reg1;
3095 rtx reg2;
3096 {
3097 unsigned int regno1 = REGNO (reg1);
3098 unsigned int regno2 = REGNO (reg2);
3099 struct ref *def1;
3100 struct ref *use1;
3101 struct ref *def2;
3102 struct ref *use2;
3103
3104
3105 /* The regs must be local to BB. */
3106 if (df_regno_bb (df, regno1) != bb
3107 || df_regno_bb (df, regno2) != bb)
3108 abort ();
3109
3110 def2 = df_bb_regno_first_def_find (df, bb, regno2);
3111 use1 = df_bb_regno_last_use_find (df, bb, regno1);
3112
3113 if (DF_INSN_LUID (df, DF_REF_INSN (def2))
3114 > DF_INSN_LUID (df, DF_REF_INSN (use1)))
3115 return -1;
3116
3117 def1 = df_bb_regno_first_def_find (df, bb, regno1);
3118 use2 = df_bb_regno_last_use_find (df, bb, regno2);
3119
3120 if (DF_INSN_LUID (df, DF_REF_INSN (def1))
3121 > DF_INSN_LUID (df, DF_REF_INSN (use2)))
3122 return 1;
3123
3124 return 0;
3125 }
3126
3127
3128 /* Return last use of REGNO within BB. */
3129 static struct ref *
3130 df_bb_regno_last_use_find (df, bb, regno)
3131 struct df * df;
3132 basic_block bb ATTRIBUTE_UNUSED;
3133 unsigned int regno;
3134 {
3135 struct df_link *link;
3136
3137 /* This assumes that the reg-use list is ordered such that for any
3138 BB, the last use is found first. However, since the BBs are not
3139 ordered, the first use in the chain is not necessarily the last
3140 use in the function. */
3141 for (link = df->regs[regno].uses; link; link = link->next)
3142 {
3143 struct ref *use = link->ref;
3144
3145 if (DF_REF_BB (use) == bb)
3146 return use;
3147 }
3148 return 0;
3149 }
3150
3151
3152 /* Return first def of REGNO within BB. */
3153 static struct ref *
3154 df_bb_regno_first_def_find (df, bb, regno)
3155 struct df * df;
3156 basic_block bb ATTRIBUTE_UNUSED;
3157 unsigned int regno;
3158 {
3159 struct df_link *link;
3160
3161 /* This assumes that the reg-def list is ordered such that for any
3162 BB, the first def is found first. However, since the BBs are not
3163 ordered, the first def in the chain is not necessarily the first
3164 def in the function. */
3165 for (link = df->regs[regno].defs; link; link = link->next)
3166 {
3167 struct ref *def = link->ref;
3168
3169 if (DF_REF_BB (def) == bb)
3170 return def;
3171 }
3172 return 0;
3173 }
3174
3175
3176 /* Return first use of REGNO inside INSN within BB. */
3177 static struct ref *
3178 df_bb_insn_regno_last_use_find (df, bb, insn, regno)
3179 struct df * df;
3180 basic_block bb ATTRIBUTE_UNUSED;
3181 rtx insn;
3182 unsigned int regno;
3183 {
3184 unsigned int uid;
3185 struct df_link *link;
3186
3187 uid = INSN_UID (insn);
3188
3189 for (link = df->insns[uid].uses; link; link = link->next)
3190 {
3191 struct ref *use = link->ref;
3192
3193 if (DF_REF_REGNO (use) == regno)
3194 return use;
3195 }
3196
3197 return 0;
3198 }
3199
3200
3201 /* Return first def of REGNO inside INSN within BB. */
3202 static struct ref *
3203 df_bb_insn_regno_first_def_find (df, bb, insn, regno)
3204 struct df * df;
3205 basic_block bb ATTRIBUTE_UNUSED;
3206 rtx insn;
3207 unsigned int regno;
3208 {
3209 unsigned int uid;
3210 struct df_link *link;
3211
3212 uid = INSN_UID (insn);
3213
3214 for (link = df->insns[uid].defs; link; link = link->next)
3215 {
3216 struct ref *def = link->ref;
3217
3218 if (DF_REF_REGNO (def) == regno)
3219 return def;
3220 }
3221
3222 return 0;
3223 }
3224
3225
3226 /* Return insn using REG if the BB contains only a single
3227 use and def of REG. */
3228 rtx
3229 df_bb_single_def_use_insn_find (df, bb, insn, reg)
3230 struct df * df;
3231 basic_block bb;
3232 rtx insn;
3233 rtx reg;
3234 {
3235 struct ref *def;
3236 struct ref *use;
3237 struct df_link *du_link;
3238
3239 def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
3240
3241 if (! def)
3242 abort ();
3243
3244 du_link = DF_REF_CHAIN (def);
3245
3246 if (! du_link)
3247 return NULL_RTX;
3248
3249 use = du_link->ref;
3250
3251 /* Check if def is dead. */
3252 if (! use)
3253 return NULL_RTX;
3254
3255 /* Check for multiple uses. */
3256 if (du_link->next)
3257 return NULL_RTX;
3258
3259 return DF_REF_INSN (use);
3260 }
3261 \f
3262 /* Functions for debugging/dumping dataflow information. */
3263
3264
3265 /* Dump a def-use or use-def chain for REF to FILE. */
3266 static void
3267 df_chain_dump (link, file)
3268 struct df_link *link;
3269 FILE *file;
3270 {
3271 fprintf (file, "{ ");
3272 for (; link; link = link->next)
3273 {
3274 fprintf (file, "%c%d ",
3275 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3276 DF_REF_ID (link->ref));
3277 }
3278 fprintf (file, "}");
3279 }
3280
3281 static void
3282 df_chain_dump_regno (link, file)
3283 struct df_link *link;
3284 FILE *file;
3285 {
3286 fprintf (file, "{ ");
3287 for (; link; link = link->next)
3288 {
3289 fprintf (file, "%c%d(%d) ",
3290 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3291 DF_REF_ID (link->ref),
3292 DF_REF_REGNO (link->ref));
3293 }
3294 fprintf (file, "}");
3295 }
3296
3297 /* Dump dataflow info. */
3298 void
3299 df_dump (df, flags, file)
3300 struct df *df;
3301 int flags;
3302 FILE *file;
3303 {
3304 unsigned int j;
3305 basic_block bb;
3306
3307 if (! df || ! file)
3308 return;
3309
3310 fprintf (file, "\nDataflow summary:\n");
3311 fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n",
3312 df->n_regs, df->n_defs, df->n_uses, df->n_bbs);
3313
3314 if (flags & DF_RD)
3315 {
3316 basic_block bb;
3317
3318 fprintf (file, "Reaching defs:\n");
3319 FOR_EACH_BB (bb)
3320 {
3321 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3322
3323 if (! bb_info->rd_in)
3324 continue;
3325
3326 fprintf (file, "bb %d in \t", bb->index);
3327 dump_bitmap (file, bb_info->rd_in);
3328 fprintf (file, "bb %d gen \t", bb->index);
3329 dump_bitmap (file, bb_info->rd_gen);
3330 fprintf (file, "bb %d kill\t", bb->index);
3331 dump_bitmap (file, bb_info->rd_kill);
3332 fprintf (file, "bb %d out \t", bb->index);
3333 dump_bitmap (file, bb_info->rd_out);
3334 }
3335 }
3336
3337 if (flags & DF_UD_CHAIN)
3338 {
3339 fprintf (file, "Use-def chains:\n");
3340 for (j = 0; j < df->n_defs; j++)
3341 {
3342 if (df->defs[j])
3343 {
3344 fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
3345 j, DF_REF_BBNO (df->defs[j]),
3346 DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
3347 DF_REF_INSN_UID (df->defs[j]),
3348 DF_REF_REGNO (df->defs[j]));
3349 if (df->defs[j]->flags & DF_REF_READ_WRITE)
3350 fprintf (file, "read/write ");
3351 df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
3352 fprintf (file, "\n");
3353 }
3354 }
3355 }
3356
3357 if (flags & DF_RU)
3358 {
3359 fprintf (file, "Reaching uses:\n");
3360 FOR_EACH_BB (bb)
3361 {
3362 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3363
3364 if (! bb_info->ru_in)
3365 continue;
3366
3367 fprintf (file, "bb %d in \t", bb->index);
3368 dump_bitmap (file, bb_info->ru_in);
3369 fprintf (file, "bb %d gen \t", bb->index);
3370 dump_bitmap (file, bb_info->ru_gen);
3371 fprintf (file, "bb %d kill\t", bb->index);
3372 dump_bitmap (file, bb_info->ru_kill);
3373 fprintf (file, "bb %d out \t", bb->index);
3374 dump_bitmap (file, bb_info->ru_out);
3375 }
3376 }
3377
3378 if (flags & DF_DU_CHAIN)
3379 {
3380 fprintf (file, "Def-use chains:\n");
3381 for (j = 0; j < df->n_uses; j++)
3382 {
3383 if (df->uses[j])
3384 {
3385 fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
3386 j, DF_REF_BBNO (df->uses[j]),
3387 DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
3388 DF_REF_INSN_UID (df->uses[j]),
3389 DF_REF_REGNO (df->uses[j]));
3390 if (df->uses[j]->flags & DF_REF_READ_WRITE)
3391 fprintf (file, "read/write ");
3392 df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
3393 fprintf (file, "\n");
3394 }
3395 }
3396 }
3397
3398 if (flags & DF_LR)
3399 {
3400 fprintf (file, "Live regs:\n");
3401 FOR_EACH_BB (bb)
3402 {
3403 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3404
3405 if (! bb_info->lr_in)
3406 continue;
3407
3408 fprintf (file, "bb %d in \t", bb->index);
3409 dump_bitmap (file, bb_info->lr_in);
3410 fprintf (file, "bb %d use \t", bb->index);
3411 dump_bitmap (file, bb_info->lr_use);
3412 fprintf (file, "bb %d def \t", bb->index);
3413 dump_bitmap (file, bb_info->lr_def);
3414 fprintf (file, "bb %d out \t", bb->index);
3415 dump_bitmap (file, bb_info->lr_out);
3416 }
3417 }
3418
3419 if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN))
3420 {
3421 struct reg_info *reg_info = df->regs;
3422
3423 fprintf (file, "Register info:\n");
3424 for (j = 0; j < df->n_regs; j++)
3425 {
3426 if (((flags & DF_REG_INFO)
3427 && (reg_info[j].n_uses || reg_info[j].n_defs))
3428 || ((flags & DF_RD_CHAIN) && reg_info[j].defs)
3429 || ((flags & DF_RU_CHAIN) && reg_info[j].uses))
3430 {
3431 fprintf (file, "reg %d", j);
3432 if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
3433 {
3434 basic_block bb = df_regno_bb (df, j);
3435
3436 if (bb)
3437 fprintf (file, " bb %d", bb->index);
3438 else
3439 fprintf (file, " bb ?");
3440 }
3441 if (flags & DF_REG_INFO)
3442 {
3443 fprintf (file, " life %d", reg_info[j].lifetime);
3444 }
3445
3446 if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
3447 {
3448 fprintf (file, " defs ");
3449 if (flags & DF_REG_INFO)
3450 fprintf (file, "%d ", reg_info[j].n_defs);
3451 if (flags & DF_RD_CHAIN)
3452 df_chain_dump (reg_info[j].defs, file);
3453 }
3454
3455 if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
3456 {
3457 fprintf (file, " uses ");
3458 if (flags & DF_REG_INFO)
3459 fprintf (file, "%d ", reg_info[j].n_uses);
3460 if (flags & DF_RU_CHAIN)
3461 df_chain_dump (reg_info[j].uses, file);
3462 }
3463
3464 fprintf (file, "\n");
3465 }
3466 }
3467 }
3468 fprintf (file, "\n");
3469 }
3470
3471
3472 void
3473 df_insn_debug (df, insn, file)
3474 struct df *df;
3475 rtx insn;
3476 FILE *file;
3477 {
3478 unsigned int uid;
3479 int bbi;
3480
3481 uid = INSN_UID (insn);
3482 if (uid >= df->insn_size)
3483 return;
3484
3485 if (df->insns[uid].defs)
3486 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3487 else if (df->insns[uid].uses)
3488 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3489 else
3490 bbi = -1;
3491
3492 fprintf (file, "insn %d bb %d luid %d defs ",
3493 uid, bbi, DF_INSN_LUID (df, insn));
3494 df_chain_dump (df->insns[uid].defs, file);
3495 fprintf (file, " uses ");
3496 df_chain_dump (df->insns[uid].uses, file);
3497 fprintf (file, "\n");
3498 }
3499
3500 void
3501 df_insn_debug_regno (df, insn, file)
3502 struct df *df;
3503 rtx insn;
3504 FILE *file;
3505 {
3506 unsigned int uid;
3507 int bbi;
3508
3509 uid = INSN_UID (insn);
3510 if (uid >= df->insn_size)
3511 return;
3512
3513 if (df->insns[uid].defs)
3514 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3515 else if (df->insns[uid].uses)
3516 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3517 else
3518 bbi = -1;
3519
3520 fprintf (file, "insn %d bb %d luid %d defs ",
3521 uid, bbi, DF_INSN_LUID (df, insn));
3522 df_chain_dump_regno (df->insns[uid].defs, file);
3523 fprintf (file, " uses ");
3524 df_chain_dump_regno (df->insns[uid].uses, file);
3525 fprintf (file, "\n");
3526 }
3527
3528 static void
3529 df_regno_debug (df, regno, file)
3530 struct df *df;
3531 unsigned int regno;
3532 FILE *file;
3533 {
3534 if (regno >= df->reg_size)
3535 return;
3536
3537 fprintf (file, "reg %d life %d defs ",
3538 regno, df->regs[regno].lifetime);
3539 df_chain_dump (df->regs[regno].defs, file);
3540 fprintf (file, " uses ");
3541 df_chain_dump (df->regs[regno].uses, file);
3542 fprintf (file, "\n");
3543 }
3544
3545
3546 static void
3547 df_ref_debug (df, ref, file)
3548 struct df *df;
3549 struct ref *ref;
3550 FILE *file;
3551 {
3552 fprintf (file, "%c%d ",
3553 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
3554 DF_REF_ID (ref));
3555 fprintf (file, "reg %d bb %d luid %d insn %d chain ",
3556 DF_REF_REGNO (ref),
3557 DF_REF_BBNO (ref),
3558 DF_INSN_LUID (df, DF_REF_INSN (ref)),
3559 INSN_UID (DF_REF_INSN (ref)));
3560 df_chain_dump (DF_REF_CHAIN (ref), file);
3561 fprintf (file, "\n");
3562 }
3563
3564
3565 void
3566 debug_df_insn (insn)
3567 rtx insn;
3568 {
3569 df_insn_debug (ddf, insn, stderr);
3570 debug_rtx (insn);
3571 }
3572
3573
3574 void
3575 debug_df_reg (reg)
3576 rtx reg;
3577 {
3578 df_regno_debug (ddf, REGNO (reg), stderr);
3579 }
3580
3581
3582 void
3583 debug_df_regno (regno)
3584 unsigned int regno;
3585 {
3586 df_regno_debug (ddf, regno, stderr);
3587 }
3588
3589
3590 void
3591 debug_df_ref (ref)
3592 struct ref *ref;
3593 {
3594 df_ref_debug (ddf, ref, stderr);
3595 }
3596
3597
3598 void
3599 debug_df_defno (defno)
3600 unsigned int defno;
3601 {
3602 df_ref_debug (ddf, ddf->defs[defno], stderr);
3603 }
3604
3605
3606 void
3607 debug_df_useno (defno)
3608 unsigned int defno;
3609 {
3610 df_ref_debug (ddf, ddf->uses[defno], stderr);
3611 }
3612
3613
3614 void
3615 debug_df_chain (link)
3616 struct df_link *link;
3617 {
3618 df_chain_dump (link, stderr);
3619 fputc ('\n', stderr);
3620 }
3621
3622 /* Hybrid search algorithm from "Implementation Techniques for
3623 Efficient Data-Flow Analysis of Large Programs". */
3624 static void
3625 hybrid_search_bitmap (block, in, out, gen, kill, dir,
3626 conf_op, transfun, visited, pending,
3627 data)
3628 basic_block block;
3629 bitmap *in, *out, *gen, *kill;
3630 enum df_flow_dir dir;
3631 enum df_confluence_op conf_op;
3632 transfer_function_bitmap transfun;
3633 sbitmap visited;
3634 sbitmap pending;
3635 void *data;
3636 {
3637 int changed;
3638 int i = block->index;
3639 edge e;
3640 basic_block bb= block;
3641 SET_BIT (visited, block->index);
3642 if (TEST_BIT (pending, block->index))
3643 {
3644 if (dir == FORWARD)
3645 {
3646 /* Calculate <conf_op> of predecessor_outs */
3647 bitmap_zero (in[i]);
3648 for (e = bb->pred; e != 0; e = e->pred_next)
3649 {
3650 if (e->src == ENTRY_BLOCK_PTR)
3651 continue;
3652 switch (conf_op)
3653 {
3654 case UNION:
3655 bitmap_a_or_b (in[i], in[i], out[e->src->index]);
3656 break;
3657 case INTERSECTION:
3658 bitmap_a_and_b (in[i], in[i], out[e->src->index]);
3659 break;
3660 }
3661 }
3662 }
3663 else
3664 {
3665 /* Calculate <conf_op> of successor ins */
3666 bitmap_zero(out[i]);
3667 for (e = bb->succ; e != 0; e = e->succ_next)
3668 {
3669 if (e->dest == EXIT_BLOCK_PTR)
3670 continue;
3671 switch (conf_op)
3672 {
3673 case UNION:
3674 bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3675 break;
3676 case INTERSECTION:
3677 bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3678 break;
3679 }
3680 }
3681 }
3682 /* Common part */
3683 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3684 RESET_BIT (pending, i);
3685 if (changed)
3686 {
3687 if (dir == FORWARD)
3688 {
3689 for (e = bb->succ; e != 0; e = e->succ_next)
3690 {
3691 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3692 continue;
3693 SET_BIT (pending, e->dest->index);
3694 }
3695 }
3696 else
3697 {
3698 for (e = bb->pred; e != 0; e = e->pred_next)
3699 {
3700 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3701 continue;
3702 SET_BIT (pending, e->src->index);
3703 }
3704 }
3705 }
3706 }
3707 if (dir == FORWARD)
3708 {
3709 for (e = bb->succ; e != 0; e = e->succ_next)
3710 {
3711 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3712 continue;
3713 if (!TEST_BIT (visited, e->dest->index))
3714 hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
3715 conf_op, transfun, visited, pending,
3716 data);
3717 }
3718 }
3719 else
3720 {
3721 for (e = bb->pred; e != 0; e = e->pred_next)
3722 {
3723 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3724 continue;
3725 if (!TEST_BIT (visited, e->src->index))
3726 hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
3727 conf_op, transfun, visited, pending,
3728 data);
3729 }
3730 }
3731 }
3732
3733
3734 /* Hybrid search for sbitmaps, rather than bitmaps. */
3735 static void
3736 hybrid_search_sbitmap (block, in, out, gen, kill, dir,
3737 conf_op, transfun, visited, pending,
3738 data)
3739 basic_block block;
3740 sbitmap *in, *out, *gen, *kill;
3741 enum df_flow_dir dir;
3742 enum df_confluence_op conf_op;
3743 transfer_function_sbitmap transfun;
3744 sbitmap visited;
3745 sbitmap pending;
3746 void *data;
3747 {
3748 int changed;
3749 int i = block->index;
3750 edge e;
3751 basic_block bb= block;
3752 SET_BIT (visited, block->index);
3753 if (TEST_BIT (pending, block->index))
3754 {
3755 if (dir == FORWARD)
3756 {
3757 /* Calculate <conf_op> of predecessor_outs */
3758 sbitmap_zero (in[i]);
3759 for (e = bb->pred; e != 0; e = e->pred_next)
3760 {
3761 if (e->src == ENTRY_BLOCK_PTR)
3762 continue;
3763 switch (conf_op)
3764 {
3765 case UNION:
3766 sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
3767 break;
3768 case INTERSECTION:
3769 sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
3770 break;
3771 }
3772 }
3773 }
3774 else
3775 {
3776 /* Calculate <conf_op> of successor ins */
3777 sbitmap_zero(out[i]);
3778 for (e = bb->succ; e != 0; e = e->succ_next)
3779 {
3780 if (e->dest == EXIT_BLOCK_PTR)
3781 continue;
3782 switch (conf_op)
3783 {
3784 case UNION:
3785 sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3786 break;
3787 case INTERSECTION:
3788 sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3789 break;
3790 }
3791 }
3792 }
3793 /* Common part */
3794 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3795 RESET_BIT (pending, i);
3796 if (changed)
3797 {
3798 if (dir == FORWARD)
3799 {
3800 for (e = bb->succ; e != 0; e = e->succ_next)
3801 {
3802 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3803 continue;
3804 SET_BIT (pending, e->dest->index);
3805 }
3806 }
3807 else
3808 {
3809 for (e = bb->pred; e != 0; e = e->pred_next)
3810 {
3811 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3812 continue;
3813 SET_BIT (pending, e->src->index);
3814 }
3815 }
3816 }
3817 }
3818 if (dir == FORWARD)
3819 {
3820 for (e = bb->succ; e != 0; e = e->succ_next)
3821 {
3822 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3823 continue;
3824 if (!TEST_BIT (visited, e->dest->index))
3825 hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
3826 conf_op, transfun, visited, pending,
3827 data);
3828 }
3829 }
3830 else
3831 {
3832 for (e = bb->pred; e != 0; e = e->pred_next)
3833 {
3834 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3835 continue;
3836 if (!TEST_BIT (visited, e->src->index))
3837 hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
3838 conf_op, transfun, visited, pending,
3839 data);
3840 }
3841 }
3842 }
3843
3844
3845
3846
3847 /* gen = GEN set.
3848 kill = KILL set.
3849 in, out = Filled in by function.
3850 blocks = Blocks to analyze.
3851 dir = Dataflow direction.
3852 conf_op = Confluence operation.
3853 transfun = Transfer function.
3854 order = Order to iterate in. (Should map block numbers -> order)
3855 data = Whatever you want. It's passed to the transfer function.
3856
3857 This function will perform iterative bitvector dataflow, producing
3858 the in and out sets. Even if you only want to perform it for a
3859 small number of blocks, the vectors for in and out must be large
3860 enough for *all* blocks, because changing one block might affect
3861 others. However, it'll only put what you say to analyze on the
3862 initial worklist.
3863
3864 For forward problems, you probably want to pass in a mapping of
3865 block number to rc_order (like df->inverse_rc_map).
3866 */
3867 void
3868 iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
3869 dir, conf_op, transfun, order, data)
3870 sbitmap *in, *out, *gen, *kill;
3871 bitmap blocks;
3872 enum df_flow_dir dir;
3873 enum df_confluence_op conf_op;
3874 transfer_function_sbitmap transfun;
3875 int *order;
3876 void *data;
3877 {
3878 int i;
3879 fibheap_t worklist;
3880 basic_block bb;
3881 sbitmap visited, pending;
3882 pending = sbitmap_alloc (last_basic_block);
3883 visited = sbitmap_alloc (last_basic_block);
3884 sbitmap_zero (pending);
3885 sbitmap_zero (visited);
3886 worklist = fibheap_new ();
3887 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3888 {
3889 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3890 SET_BIT (pending, i);
3891 if (dir == FORWARD)
3892 sbitmap_copy (out[i], gen[i]);
3893 else
3894 sbitmap_copy (in[i], gen[i]);
3895 });
3896 while (sbitmap_first_set_bit (pending) != -1)
3897 {
3898 while (!fibheap_empty (worklist))
3899 {
3900 i = (size_t) fibheap_extract_min (worklist);
3901 bb = BASIC_BLOCK (i);
3902 if (!TEST_BIT (visited, bb->index))
3903 hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
3904 conf_op, transfun, visited, pending, data);
3905 }
3906 if (sbitmap_first_set_bit (pending) != -1)
3907 {
3908 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3909 {
3910 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3911 });
3912 sbitmap_zero (visited);
3913 }
3914 else
3915 {
3916 break;
3917 }
3918 }
3919 sbitmap_free (pending);
3920 sbitmap_free (visited);
3921 fibheap_delete (worklist);
3922 }
3923
3924 /* Exactly the same as iterative_dataflow_sbitmap, except it works on
3925 bitmaps instead */
3926 void
3927 iterative_dataflow_bitmap (in, out, gen, kill, blocks,
3928 dir, conf_op, transfun, order, data)
3929 bitmap *in, *out, *gen, *kill;
3930 bitmap blocks;
3931 enum df_flow_dir dir;
3932 enum df_confluence_op conf_op;
3933 transfer_function_bitmap transfun;
3934 int *order;
3935 void *data;
3936 {
3937 int i;
3938 fibheap_t worklist;
3939 basic_block bb;
3940 sbitmap visited, pending;
3941 pending = sbitmap_alloc (last_basic_block);
3942 visited = sbitmap_alloc (last_basic_block);
3943 sbitmap_zero (pending);
3944 sbitmap_zero (visited);
3945 worklist = fibheap_new ();
3946 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3947 {
3948 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3949 SET_BIT (pending, i);
3950 if (dir == FORWARD)
3951 bitmap_copy (out[i], gen[i]);
3952 else
3953 bitmap_copy (in[i], gen[i]);
3954 });
3955 while (sbitmap_first_set_bit (pending) != -1)
3956 {
3957 while (!fibheap_empty (worklist))
3958 {
3959 i = (size_t) fibheap_extract_min (worklist);
3960 bb = BASIC_BLOCK (i);
3961 if (!TEST_BIT (visited, bb->index))
3962 hybrid_search_bitmap (bb, in, out, gen, kill, dir,
3963 conf_op, transfun, visited, pending, data);
3964 }
3965 if (sbitmap_first_set_bit (pending) != -1)
3966 {
3967 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3968 {
3969 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3970 });
3971 sbitmap_zero (visited);
3972 }
3973 else
3974 {
3975 break;
3976 }
3977 }
3978 sbitmap_free (pending);
3979 sbitmap_free (visited);
3980 fibheap_delete (worklist);
3981 }