]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
ChangeLog.0, [...]: Fix spelling errors.
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* TODO
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - a store to the same address as a load does not kill the load if the
28 source of the store is also the destination of the load. Handling this
29 allows more load motion, particularly out of loops.
30 - ability to realloc sbitmap vectors would allow one initial computation
31 of reg_set_in_block with only subsequent additions, rather than
32 recomputing it for each pass
33
34 */
35
36 /* References searched while implementing this.
37
38 Compilers Principles, Techniques and Tools
39 Aho, Sethi, Ullman
40 Addison-Wesley, 1988
41
42 Global Optimization by Suppression of Partial Redundancies
43 E. Morel, C. Renvoise
44 communications of the acm, Vol. 22, Num. 2, Feb. 1979
45
46 A Portable Machine-Independent Global Optimizer - Design and Measurements
47 Frederick Chow
48 Stanford Ph.D. thesis, Dec. 1983
49
50 A Fast Algorithm for Code Movement Optimization
51 D.M. Dhamdhere
52 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
53
54 A Solution to a Problem with Morel and Renvoise's
55 Global Optimization by Suppression of Partial Redundancies
56 K-H Drechsler, M.P. Stadel
57 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
58
59 Practical Adaptation of the Global Optimization
60 Algorithm of Morel and Renvoise
61 D.M. Dhamdhere
62 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
63
64 Efficiently Computing Static Single Assignment Form and the Control
65 Dependence Graph
66 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
67 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
68
69 Lazy Code Motion
70 J. Knoop, O. Ruthing, B. Steffen
71 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
72
73 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
74 Time for Reducible Flow Control
75 Thomas Ball
76 ACM Letters on Programming Languages and Systems,
77 Vol. 2, Num. 1-4, Mar-Dec 1993
78
79 An Efficient Representation for Sparse Sets
80 Preston Briggs, Linda Torczon
81 ACM Letters on Programming Languages and Systems,
82 Vol. 2, Num. 1-4, Mar-Dec 1993
83
84 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
85 K-H Drechsler, M.P. Stadel
86 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
87
88 Partial Dead Code Elimination
89 J. Knoop, O. Ruthing, B. Steffen
90 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
91
92 Effective Partial Redundancy Elimination
93 P. Briggs, K.D. Cooper
94 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
95
96 The Program Structure Tree: Computing Control Regions in Linear Time
97 R. Johnson, D. Pearson, K. Pingali
98 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
99
100 Optimal Code Motion: Theory and Practice
101 J. Knoop, O. Ruthing, B. Steffen
102 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
103
104 The power of assignment motion
105 J. Knoop, O. Ruthing, B. Steffen
106 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
107
108 Global code motion / global value numbering
109 C. Click
110 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
111
112 Value Driven Redundancy Elimination
113 L.T. Simpson
114 Rice University Ph.D. thesis, Apr. 1996
115
116 Value Numbering
117 L.T. Simpson
118 Massively Scalar Compiler Project, Rice University, Sep. 1996
119
120 High Performance Compilers for Parallel Computing
121 Michael Wolfe
122 Addison-Wesley, 1996
123
124 Advanced Compiler Design and Implementation
125 Steven Muchnick
126 Morgan Kaufmann, 1997
127
128 Building an Optimizing Compiler
129 Robert Morgan
130 Digital Press, 1998
131
132 People wishing to speed up the code here should read:
133 Elimination Algorithms for Data Flow Analysis
134 B.G. Ryder, M.C. Paull
135 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
136
137 How to Analyze Large Programs Efficiently and Informatively
138 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
139 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
140
141 People wishing to do something different can find various possibilities
142 in the above papers and elsewhere.
143 */
144
145 #include "config.h"
146 #include "system.h"
147 #include "toplev.h"
148
149 #include "rtl.h"
150 #include "tm_p.h"
151 #include "regs.h"
152 #include "hard-reg-set.h"
153 #include "flags.h"
154 #include "real.h"
155 #include "insn-config.h"
156 #include "recog.h"
157 #include "basic-block.h"
158 #include "output.h"
159 #include "function.h"
160 #include "expr.h"
161 #include "ggc.h"
162 #include "params.h"
163
164 #include "obstack.h"
165 #define obstack_chunk_alloc gmalloc
166 #define obstack_chunk_free free
167
168 /* Propagate flow information through back edges and thus enable PRE's
169 moving loop invariant calculations out of loops.
170
171 Originally this tended to create worse overall code, but several
172 improvements during the development of PRE seem to have made following
173 back edges generally a win.
174
175 Note much of the loop invariant code motion done here would normally
176 be done by loop.c, which has more heuristics for when to move invariants
177 out of loops. At some point we might need to move some of those
178 heuristics into gcse.c. */
179 #define FOLLOW_BACK_EDGES 1
180
181 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
182 are a superset of those done by GCSE.
183
184 We perform the following steps:
185
186 1) Compute basic block information.
187
188 2) Compute table of places where registers are set.
189
190 3) Perform copy/constant propagation.
191
192 4) Perform global cse.
193
194 5) Perform another pass of copy/constant propagation.
195
196 Two passes of copy/constant propagation are done because the first one
197 enables more GCSE and the second one helps to clean up the copies that
198 GCSE creates. This is needed more for PRE than for Classic because Classic
199 GCSE will try to use an existing register containing the common
200 subexpression rather than create a new one. This is harder to do for PRE
201 because of the code motion (which Classic GCSE doesn't do).
202
203 Expressions we are interested in GCSE-ing are of the form
204 (set (pseudo-reg) (expression)).
205 Function want_to_gcse_p says what these are.
206
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
209
210 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
211 assignment) based GVN (global value numbering). L. T. Simpson's paper
212 (Rice University) on value numbering is a useful reference for this.
213
214 **********************
215
216 We used to support multiple passes but there are diminishing returns in
217 doing so. The first pass usually makes 90% of the changes that are doable.
218 A second pass can make a few more changes made possible by the first pass.
219 Experiments show any further passes don't make enough changes to justify
220 the expense.
221
222 A study of spec92 using an unlimited number of passes:
223 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
224 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
225 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
226
227 It was found doing copy propagation between each pass enables further
228 substitutions.
229
230 PRE is quite expensive in complicated functions because the DFA can take
231 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
232 be modified if one wants to experiment.
233
234 **********************
235
236 The steps for PRE are:
237
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
239
240 2) Perform the data flow analysis for PRE.
241
242 3) Delete the redundant instructions
243
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
246
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
249
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
252
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
256
257 PRE GCSE depends heavily on the second CSE pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing register.
263
264 **********************
265
266 A fair bit of simplicity is created by creating small functions for simple
267 tasks, even when the function is only called in one place. This may
268 measurably slow things down [or may not] by creating more function call
269 overhead than is necessary. The source is laid out so that it's trivial
270 to make the affected functions inline so that one can measure what speed
271 up, if any, can be achieved, and maybe later when things settle things can
272 be rearranged.
273
274 Help stamp out big monolithic functions! */
275 \f
276 /* GCSE global vars. */
277
278 /* -dG dump file. */
279 static FILE *gcse_file;
280
281 /* Note whether or not we should run jump optimization after gcse. We
282 want to do this for two cases.
283
284 * If we changed any jumps via cprop.
285
286 * If we added any labels via edge splitting. */
287
288 static int run_jump_opt_after_gcse;
289
290 /* Bitmaps are normally not included in debugging dumps.
291 However it's useful to be able to print them from GDB.
292 We could create special functions for this, but it's simpler to
293 just allow passing stderr to the dump_foo fns. Since stderr can
294 be a macro, we store a copy here. */
295 static FILE *debug_stderr;
296
297 /* An obstack for our working variables. */
298 static struct obstack gcse_obstack;
299
300 /* Non-zero for each mode that supports (set (reg) (reg)).
301 This is trivially true for integer and floating point values.
302 It may or may not be true for condition codes. */
303 static char can_copy_p[(int) NUM_MACHINE_MODES];
304
305 /* Non-zero if can_copy_p has been initialized. */
306 static int can_copy_init_p;
307
308 struct reg_use {rtx reg_rtx; };
309
310 /* Hash table of expressions. */
311
312 struct expr
313 {
314 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
315 rtx expr;
316 /* Index in the available expression bitmaps. */
317 int bitmap_index;
318 /* Next entry with the same hash. */
319 struct expr *next_same_hash;
320 /* List of anticipatable occurrences in basic blocks in the function.
321 An "anticipatable occurrence" is one that is the first occurrence in the
322 basic block, the operands are not modified in the basic block prior
323 to the occurrence and the output is not used between the start of
324 the block and the occurrence. */
325 struct occr *antic_occr;
326 /* List of available occurrence in basic blocks in the function.
327 An "available occurrence" is one that is the last occurrence in the
328 basic block and the operands are not modified by following statements in
329 the basic block [including this insn]. */
330 struct occr *avail_occr;
331 /* Non-null if the computation is PRE redundant.
332 The value is the newly created pseudo-reg to record a copy of the
333 expression in all the places that reach the redundant copy. */
334 rtx reaching_reg;
335 };
336
337 /* Occurrence of an expression.
338 There is one per basic block. If a pattern appears more than once the
339 last appearance is used [or first for anticipatable expressions]. */
340
341 struct occr
342 {
343 /* Next occurrence of this expression. */
344 struct occr *next;
345 /* The insn that computes the expression. */
346 rtx insn;
347 /* Non-zero if this [anticipatable] occurrence has been deleted. */
348 char deleted_p;
349 /* Non-zero if this [available] occurrence has been copied to
350 reaching_reg. */
351 /* ??? This is mutually exclusive with deleted_p, so they could share
352 the same byte. */
353 char copied_p;
354 };
355
356 /* Expression and copy propagation hash tables.
357 Each hash table is an array of buckets.
358 ??? It is known that if it were an array of entries, structure elements
359 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
360 not clear whether in the final analysis a sufficient amount of memory would
361 be saved as the size of the available expression bitmaps would be larger
362 [one could build a mapping table without holes afterwards though].
363 Someday I'll perform the computation and figure it out. */
364
365 /* Total size of the expression hash table, in elements. */
366 static unsigned int expr_hash_table_size;
367
368 /* The table itself.
369 This is an array of `expr_hash_table_size' elements. */
370 static struct expr **expr_hash_table;
371
372 /* Total size of the copy propagation hash table, in elements. */
373 static unsigned int set_hash_table_size;
374
375 /* The table itself.
376 This is an array of `set_hash_table_size' elements. */
377 static struct expr **set_hash_table;
378
379 /* Mapping of uids to cuids.
380 Only real insns get cuids. */
381 static int *uid_cuid;
382
383 /* Highest UID in UID_CUID. */
384 static int max_uid;
385
386 /* Get the cuid of an insn. */
387 #ifdef ENABLE_CHECKING
388 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
389 #else
390 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
391 #endif
392
393 /* Number of cuids. */
394 static int max_cuid;
395
396 /* Mapping of cuids to insns. */
397 static rtx *cuid_insn;
398
399 /* Get insn from cuid. */
400 #define CUID_INSN(CUID) (cuid_insn[CUID])
401
402 /* Maximum register number in function prior to doing gcse + 1.
403 Registers created during this pass have regno >= max_gcse_regno.
404 This is named with "gcse" to not collide with global of same name. */
405 static unsigned int max_gcse_regno;
406
407 /* Maximum number of cse-able expressions found. */
408 static int n_exprs;
409
410 /* Maximum number of assignments for copy propagation found. */
411 static int n_sets;
412
413 /* Table of registers that are modified.
414
415 For each register, each element is a list of places where the pseudo-reg
416 is set.
417
418 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
419 requires knowledge of which blocks kill which regs [and thus could use
420 a bitmap instead of the lists `reg_set_table' uses].
421
422 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
423 num-regs) [however perhaps it may be useful to keep the data as is]. One
424 advantage of recording things this way is that `reg_set_table' is fairly
425 sparse with respect to pseudo regs but for hard regs could be fairly dense
426 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
427 up functions like compute_transp since in the case of pseudo-regs we only
428 need to iterate over the number of times a pseudo-reg is set, not over the
429 number of basic blocks [clearly there is a bit of a slow down in the cases
430 where a pseudo is set more than once in a block, however it is believed
431 that the net effect is to speed things up]. This isn't done for hard-regs
432 because recording call-clobbered hard-regs in `reg_set_table' at each
433 function call can consume a fair bit of memory, and iterating over
434 hard-regs stored this way in compute_transp will be more expensive. */
435
436 typedef struct reg_set
437 {
438 /* The next setting of this register. */
439 struct reg_set *next;
440 /* The insn where it was set. */
441 rtx insn;
442 } reg_set;
443
444 static reg_set **reg_set_table;
445
446 /* Size of `reg_set_table'.
447 The table starts out at max_gcse_regno + slop, and is enlarged as
448 necessary. */
449 static int reg_set_table_size;
450
451 /* Amount to grow `reg_set_table' by when it's full. */
452 #define REG_SET_TABLE_SLOP 100
453
454 /* This is a list of expressions which are MEMs and will be used by load
455 or store motion.
456 Load motion tracks MEMs which aren't killed by
457 anything except itself. (ie, loads and stores to a single location).
458 We can then allow movement of these MEM refs with a little special
459 allowance. (all stores copy the same value to the reaching reg used
460 for the loads). This means all values used to store into memory must have
461 no side effects so we can re-issue the setter value.
462 Store Motion uses this structure as an expression table to track stores
463 which look interesting, and might be moveable towards the exit block. */
464
465 struct ls_expr
466 {
467 struct expr * expr; /* Gcse expression reference for LM. */
468 rtx pattern; /* Pattern of this mem. */
469 rtx loads; /* INSN list of loads seen. */
470 rtx stores; /* INSN list of stores seen. */
471 struct ls_expr * next; /* Next in the list. */
472 int invalid; /* Invalid for some reason. */
473 int index; /* If it maps to a bitmap index. */
474 int hash_index; /* Index when in a hash table. */
475 rtx reaching_reg; /* Register to use when re-writing. */
476 };
477
478 /* Head of the list of load/store memory refs. */
479 static struct ls_expr * pre_ldst_mems = NULL;
480
481 /* Bitmap containing one bit for each register in the program.
482 Used when performing GCSE to track which registers have been set since
483 the start of the basic block. */
484 static regset reg_set_bitmap;
485
486 /* For each block, a bitmap of registers set in the block.
487 This is used by expr_killed_p and compute_transp.
488 It is computed during hash table computation and not by compute_sets
489 as it includes registers added since the last pass (or between cprop and
490 gcse) and it's currently not easy to realloc sbitmap vectors. */
491 static sbitmap *reg_set_in_block;
492
493 /* Array, indexed by basic block number for a list of insns which modify
494 memory within that block. */
495 static rtx * modify_mem_list;
496 bitmap modify_mem_list_set;
497
498 /* This array parallels modify_mem_list, but is kept canonicalized. */
499 static rtx * canon_modify_mem_list;
500 bitmap canon_modify_mem_list_set;
501 /* Various variables for statistics gathering. */
502
503 /* Memory used in a pass.
504 This isn't intended to be absolutely precise. Its intent is only
505 to keep an eye on memory usage. */
506 static int bytes_used;
507
508 /* GCSE substitutions made. */
509 static int gcse_subst_count;
510 /* Number of copy instructions created. */
511 static int gcse_create_count;
512 /* Number of constants propagated. */
513 static int const_prop_count;
514 /* Number of copys propagated. */
515 static int copy_prop_count;
516 \f
517 /* These variables are used by classic GCSE.
518 Normally they'd be defined a bit later, but `rd_gen' needs to
519 be declared sooner. */
520
521 /* Each block has a bitmap of each type.
522 The length of each blocks bitmap is:
523
524 max_cuid - for reaching definitions
525 n_exprs - for available expressions
526
527 Thus we view the bitmaps as 2 dimensional arrays. i.e.
528 rd_kill[block_num][cuid_num]
529 ae_kill[block_num][expr_num] */
530
531 /* For reaching defs */
532 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
533
534 /* for available exprs */
535 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
536
537 /* Objects of this type are passed around by the null-pointer check
538 removal routines. */
539 struct null_pointer_info
540 {
541 /* The basic block being processed. */
542 int current_block;
543 /* The first register to be handled in this pass. */
544 unsigned int min_reg;
545 /* One greater than the last register to be handled in this pass. */
546 unsigned int max_reg;
547 sbitmap *nonnull_local;
548 sbitmap *nonnull_killed;
549 };
550 \f
551 static void compute_can_copy PARAMS ((void));
552 static char *gmalloc PARAMS ((unsigned int));
553 static char *grealloc PARAMS ((char *, unsigned int));
554 static char *gcse_alloc PARAMS ((unsigned long));
555 static void alloc_gcse_mem PARAMS ((rtx));
556 static void free_gcse_mem PARAMS ((void));
557 static void alloc_reg_set_mem PARAMS ((int));
558 static void free_reg_set_mem PARAMS ((void));
559 static int get_bitmap_width PARAMS ((int, int, int));
560 static void record_one_set PARAMS ((int, rtx));
561 static void record_set_info PARAMS ((rtx, rtx, void *));
562 static void compute_sets PARAMS ((rtx));
563 static void hash_scan_insn PARAMS ((rtx, int, int));
564 static void hash_scan_set PARAMS ((rtx, rtx, int));
565 static void hash_scan_clobber PARAMS ((rtx, rtx));
566 static void hash_scan_call PARAMS ((rtx, rtx));
567 static int want_to_gcse_p PARAMS ((rtx));
568 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
569 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
570 static int oprs_available_p PARAMS ((rtx, rtx));
571 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
572 int, int));
573 static void insert_set_in_table PARAMS ((rtx, rtx));
574 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
575 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
576 static unsigned int hash_string_1 PARAMS ((const char *));
577 static unsigned int hash_set PARAMS ((int, int));
578 static int expr_equiv_p PARAMS ((rtx, rtx));
579 static void record_last_reg_set_info PARAMS ((rtx, int));
580 static void record_last_mem_set_info PARAMS ((rtx));
581 static void record_last_set_info PARAMS ((rtx, rtx, void *));
582 static void compute_hash_table PARAMS ((int));
583 static void alloc_set_hash_table PARAMS ((int));
584 static void free_set_hash_table PARAMS ((void));
585 static void compute_set_hash_table PARAMS ((void));
586 static void alloc_expr_hash_table PARAMS ((unsigned int));
587 static void free_expr_hash_table PARAMS ((void));
588 static void compute_expr_hash_table PARAMS ((void));
589 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
590 int, int));
591 static struct expr *lookup_expr PARAMS ((rtx));
592 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
593 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
594 static void reset_opr_set_tables PARAMS ((void));
595 static int oprs_not_set_p PARAMS ((rtx, rtx));
596 static void mark_call PARAMS ((rtx));
597 static void mark_set PARAMS ((rtx, rtx));
598 static void mark_clobber PARAMS ((rtx, rtx));
599 static void mark_oprs_set PARAMS ((rtx));
600 static void alloc_cprop_mem PARAMS ((int, int));
601 static void free_cprop_mem PARAMS ((void));
602 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
603 static void compute_transpout PARAMS ((void));
604 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
605 int));
606 static void compute_cprop_data PARAMS ((void));
607 static void find_used_regs PARAMS ((rtx *, void *));
608 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
609 static struct expr *find_avail_set PARAMS ((int, rtx));
610 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx));
611 #ifdef HAVE_cc0
612 static int cprop_cc0_jump PARAMS ((basic_block, rtx, struct reg_use *, rtx));
613 #endif
614 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
615 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
616 static void canon_list_insert PARAMS ((rtx, rtx, void *));
617 static int cprop_insn PARAMS ((basic_block, rtx, int));
618 static int cprop PARAMS ((int));
619 static int one_cprop_pass PARAMS ((int, int));
620 static void alloc_pre_mem PARAMS ((int, int));
621 static void free_pre_mem PARAMS ((void));
622 static void compute_pre_data PARAMS ((void));
623 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
624 basic_block));
625 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
626 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
627 static void pre_insert_copies PARAMS ((void));
628 static int pre_delete PARAMS ((void));
629 static int pre_gcse PARAMS ((void));
630 static int one_pre_gcse_pass PARAMS ((int));
631 static void add_label_notes PARAMS ((rtx, rtx));
632 static void alloc_code_hoist_mem PARAMS ((int, int));
633 static void free_code_hoist_mem PARAMS ((void));
634 static void compute_code_hoist_vbeinout PARAMS ((void));
635 static void compute_code_hoist_data PARAMS ((void));
636 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
637 char *));
638 static void hoist_code PARAMS ((void));
639 static int one_code_hoisting_pass PARAMS ((void));
640 static void alloc_rd_mem PARAMS ((int, int));
641 static void free_rd_mem PARAMS ((void));
642 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
643 static void compute_kill_rd PARAMS ((void));
644 static void compute_rd PARAMS ((void));
645 static void alloc_avail_expr_mem PARAMS ((int, int));
646 static void free_avail_expr_mem PARAMS ((void));
647 static void compute_ae_gen PARAMS ((void));
648 static int expr_killed_p PARAMS ((rtx, basic_block));
649 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
650 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
651 basic_block, int));
652 static rtx computing_insn PARAMS ((struct expr *, rtx));
653 static int def_reaches_here_p PARAMS ((rtx, rtx));
654 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
655 static int handle_avail_expr PARAMS ((rtx, struct expr *));
656 static int classic_gcse PARAMS ((void));
657 static int one_classic_gcse_pass PARAMS ((int));
658 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
659 static void delete_null_pointer_checks_1 PARAMS ((varray_type *, unsigned int *,
660 sbitmap *, sbitmap *,
661 struct null_pointer_info *));
662 static rtx process_insert_insn PARAMS ((struct expr *));
663 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
664 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
665 basic_block, int, char *));
666 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
667 basic_block, char *));
668 static struct ls_expr * ldst_entry PARAMS ((rtx));
669 static void free_ldst_entry PARAMS ((struct ls_expr *));
670 static void free_ldst_mems PARAMS ((void));
671 static void print_ldst_list PARAMS ((FILE *));
672 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
673 static int enumerate_ldsts PARAMS ((void));
674 static inline struct ls_expr * first_ls_expr PARAMS ((void));
675 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
676 static int simple_mem PARAMS ((rtx));
677 static void invalidate_any_buried_refs PARAMS ((rtx));
678 static void compute_ld_motion_mems PARAMS ((void));
679 static void trim_ld_motion_mems PARAMS ((void));
680 static void update_ld_motion_stores PARAMS ((struct expr *));
681 static void reg_set_info PARAMS ((rtx, rtx, void *));
682 static int store_ops_ok PARAMS ((rtx, basic_block));
683 static void find_moveable_store PARAMS ((rtx));
684 static int compute_store_table PARAMS ((void));
685 static int load_kills_store PARAMS ((rtx, rtx));
686 static int find_loads PARAMS ((rtx, rtx));
687 static int store_killed_in_insn PARAMS ((rtx, rtx));
688 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
689 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
690 static void build_store_vectors PARAMS ((void));
691 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
692 static int insert_store PARAMS ((struct ls_expr *, edge));
693 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
694 static void delete_store PARAMS ((struct ls_expr *,
695 basic_block));
696 static void free_store_memory PARAMS ((void));
697 static void store_motion PARAMS ((void));
698 static void clear_modify_mem_tables PARAMS ((void));
699 static void free_modify_mem_tables PARAMS ((void));
700 \f
701 /* Entry point for global common subexpression elimination.
702 F is the first instruction in the function. */
703
704 int
705 gcse_main (f, file)
706 rtx f;
707 FILE *file;
708 {
709 int changed, pass;
710 /* Bytes used at start of pass. */
711 int initial_bytes_used;
712 /* Maximum number of bytes used by a pass. */
713 int max_pass_bytes;
714 /* Point to release obstack data from for each pass. */
715 char *gcse_obstack_bottom;
716
717 /* Insertion of instructions on edges can create new basic blocks; we
718 need the original basic block count so that we can properly deallocate
719 arrays sized on the number of basic blocks originally in the cfg. */
720 int orig_bb_count;
721 /* We do not construct an accurate cfg in functions which call
722 setjmp, so just punt to be safe. */
723 if (current_function_calls_setjmp)
724 return 0;
725
726 /* Assume that we do not need to run jump optimizations after gcse. */
727 run_jump_opt_after_gcse = 0;
728
729 /* For calling dump_foo fns from gdb. */
730 debug_stderr = stderr;
731 gcse_file = file;
732
733 /* Identify the basic block information for this function, including
734 successors and predecessors. */
735 max_gcse_regno = max_reg_num ();
736
737 if (file)
738 dump_flow_info (file);
739
740 orig_bb_count = n_basic_blocks;
741 /* Return if there's nothing to do. */
742 if (n_basic_blocks <= 1)
743 return 0;
744
745 /* Trying to perform global optimizations on flow graphs which have
746 a high connectivity will take a long time and is unlikely to be
747 particularly useful.
748
749 In normal circumstances a cfg should have about twice as many edges
750 as blocks. But we do not want to punish small functions which have
751 a couple switch statements. So we require a relatively large number
752 of basic blocks and the ratio of edges to blocks to be high. */
753 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
754 {
755 if (warn_disabled_optimization)
756 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
757 n_basic_blocks, n_edges / n_basic_blocks);
758 return 0;
759 }
760
761 /* If allocating memory for the cprop bitmap would take up too much
762 storage it's better just to disable the optimization. */
763 if ((n_basic_blocks
764 * SBITMAP_SET_SIZE (max_gcse_regno)
765 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
766 {
767 if (warn_disabled_optimization)
768 warning ("GCSE disabled: %d basic blocks and %d registers",
769 n_basic_blocks, max_gcse_regno);
770
771 return 0;
772 }
773
774 /* See what modes support reg/reg copy operations. */
775 if (! can_copy_init_p)
776 {
777 compute_can_copy ();
778 can_copy_init_p = 1;
779 }
780
781 gcc_obstack_init (&gcse_obstack);
782 bytes_used = 0;
783
784 /* We need alias. */
785 init_alias_analysis ();
786 /* Record where pseudo-registers are set. This data is kept accurate
787 during each pass. ??? We could also record hard-reg information here
788 [since it's unchanging], however it is currently done during hash table
789 computation.
790
791 It may be tempting to compute MEM set information here too, but MEM sets
792 will be subject to code motion one day and thus we need to compute
793 information about memory sets when we build the hash tables. */
794
795 alloc_reg_set_mem (max_gcse_regno);
796 compute_sets (f);
797
798 pass = 0;
799 initial_bytes_used = bytes_used;
800 max_pass_bytes = 0;
801 gcse_obstack_bottom = gcse_alloc (1);
802 changed = 1;
803 while (changed && pass < MAX_GCSE_PASSES)
804 {
805 changed = 0;
806 if (file)
807 fprintf (file, "GCSE pass %d\n\n", pass + 1);
808
809 /* Initialize bytes_used to the space for the pred/succ lists,
810 and the reg_set_table data. */
811 bytes_used = initial_bytes_used;
812
813 /* Each pass may create new registers, so recalculate each time. */
814 max_gcse_regno = max_reg_num ();
815
816 alloc_gcse_mem (f);
817
818 /* Don't allow constant propagation to modify jumps
819 during this pass. */
820 changed = one_cprop_pass (pass + 1, 0);
821
822 if (optimize_size)
823 changed |= one_classic_gcse_pass (pass + 1);
824 else
825 {
826 changed |= one_pre_gcse_pass (pass + 1);
827 /* We may have just created new basic blocks. Release and
828 recompute various things which are sized on the number of
829 basic blocks. */
830 if (changed)
831 {
832 free_modify_mem_tables ();
833 modify_mem_list
834 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
835 canon_modify_mem_list
836 = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
837 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
838 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
839 orig_bb_count = n_basic_blocks;
840 }
841 free_reg_set_mem ();
842 alloc_reg_set_mem (max_reg_num ());
843 compute_sets (f);
844 run_jump_opt_after_gcse = 1;
845 }
846
847 if (max_pass_bytes < bytes_used)
848 max_pass_bytes = bytes_used;
849
850 /* Free up memory, then reallocate for code hoisting. We can
851 not re-use the existing allocated memory because the tables
852 will not have info for the insns or registers created by
853 partial redundancy elimination. */
854 free_gcse_mem ();
855
856 /* It does not make sense to run code hoisting unless we optimizing
857 for code size -- it rarely makes programs faster, and can make
858 them bigger if we did partial redundancy elimination (when optimizing
859 for space, we use a classic gcse algorithm instead of partial
860 redundancy algorithms). */
861 if (optimize_size)
862 {
863 max_gcse_regno = max_reg_num ();
864 alloc_gcse_mem (f);
865 changed |= one_code_hoisting_pass ();
866 free_gcse_mem ();
867
868 if (max_pass_bytes < bytes_used)
869 max_pass_bytes = bytes_used;
870 }
871
872 if (file)
873 {
874 fprintf (file, "\n");
875 fflush (file);
876 }
877
878 obstack_free (&gcse_obstack, gcse_obstack_bottom);
879 pass++;
880 }
881
882 /* Do one last pass of copy propagation, including cprop into
883 conditional jumps. */
884
885 max_gcse_regno = max_reg_num ();
886 alloc_gcse_mem (f);
887 /* This time, go ahead and allow cprop to alter jumps. */
888 one_cprop_pass (pass + 1, 1);
889 free_gcse_mem ();
890
891 if (file)
892 {
893 fprintf (file, "GCSE of %s: %d basic blocks, ",
894 current_function_name, n_basic_blocks);
895 fprintf (file, "%d pass%s, %d bytes\n\n",
896 pass, pass > 1 ? "es" : "", max_pass_bytes);
897 }
898
899 obstack_free (&gcse_obstack, NULL);
900 free_reg_set_mem ();
901 /* We are finished with alias. */
902 end_alias_analysis ();
903 allocate_reg_info (max_reg_num (), FALSE, FALSE);
904
905 if (!optimize_size && flag_gcse_sm)
906 store_motion ();
907 /* Record where pseudo-registers are set. */
908 return run_jump_opt_after_gcse;
909 }
910 \f
911 /* Misc. utilities. */
912
913 /* Compute which modes support reg/reg copy operations. */
914
915 static void
916 compute_can_copy ()
917 {
918 int i;
919 #ifndef AVOID_CCMODE_COPIES
920 rtx reg,insn;
921 #endif
922 memset (can_copy_p, 0, NUM_MACHINE_MODES);
923
924 start_sequence ();
925 for (i = 0; i < NUM_MACHINE_MODES; i++)
926 if (GET_MODE_CLASS (i) == MODE_CC)
927 {
928 #ifdef AVOID_CCMODE_COPIES
929 can_copy_p[i] = 0;
930 #else
931 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
932 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
933 if (recog (PATTERN (insn), insn, NULL) >= 0)
934 can_copy_p[i] = 1;
935 #endif
936 }
937 else
938 can_copy_p[i] = 1;
939
940 end_sequence ();
941 }
942 \f
943 /* Cover function to xmalloc to record bytes allocated. */
944
945 static char *
946 gmalloc (size)
947 unsigned int size;
948 {
949 bytes_used += size;
950 return xmalloc (size);
951 }
952
953 /* Cover function to xrealloc.
954 We don't record the additional size since we don't know it.
955 It won't affect memory usage stats much anyway. */
956
957 static char *
958 grealloc (ptr, size)
959 char *ptr;
960 unsigned int size;
961 {
962 return xrealloc (ptr, size);
963 }
964
965 /* Cover function to obstack_alloc.
966 We don't need to record the bytes allocated here since
967 obstack_chunk_alloc is set to gmalloc. */
968
969 static char *
970 gcse_alloc (size)
971 unsigned long size;
972 {
973 return (char *) obstack_alloc (&gcse_obstack, size);
974 }
975
976 /* Allocate memory for the cuid mapping array,
977 and reg/memory set tracking tables.
978
979 This is called at the start of each pass. */
980
981 static void
982 alloc_gcse_mem (f)
983 rtx f;
984 {
985 int i,n;
986 rtx insn;
987
988 /* Find the largest UID and create a mapping from UIDs to CUIDs.
989 CUIDs are like UIDs except they increase monotonically, have no gaps,
990 and only apply to real insns. */
991
992 max_uid = get_max_uid ();
993 n = (max_uid + 1) * sizeof (int);
994 uid_cuid = (int *) gmalloc (n);
995 memset ((char *) uid_cuid, 0, n);
996 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
997 {
998 if (INSN_P (insn))
999 uid_cuid[INSN_UID (insn)] = i++;
1000 else
1001 uid_cuid[INSN_UID (insn)] = i;
1002 }
1003
1004 /* Create a table mapping cuids to insns. */
1005
1006 max_cuid = i;
1007 n = (max_cuid + 1) * sizeof (rtx);
1008 cuid_insn = (rtx *) gmalloc (n);
1009 memset ((char *) cuid_insn, 0, n);
1010 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1011 if (INSN_P (insn))
1012 CUID_INSN (i++) = insn;
1013
1014 /* Allocate vars to track sets of regs. */
1015 reg_set_bitmap = BITMAP_XMALLOC ();
1016
1017 /* Allocate vars to track sets of regs, memory per block. */
1018 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
1019 max_gcse_regno);
1020 /* Allocate array to keep a list of insns which modify memory in each
1021 basic block. */
1022 modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1023 canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx *));
1024 memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1025 memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx *));
1026 modify_mem_list_set = BITMAP_XMALLOC ();
1027 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1028 }
1029
1030 /* Free memory allocated by alloc_gcse_mem. */
1031
1032 static void
1033 free_gcse_mem ()
1034 {
1035 free (uid_cuid);
1036 free (cuid_insn);
1037
1038 BITMAP_XFREE (reg_set_bitmap);
1039
1040 sbitmap_vector_free (reg_set_in_block);
1041 free_modify_mem_tables ();
1042 BITMAP_XFREE (modify_mem_list_set);
1043 BITMAP_XFREE (canon_modify_mem_list_set);
1044 }
1045
1046 /* Many of the global optimization algorithms work by solving dataflow
1047 equations for various expressions. Initially, some local value is
1048 computed for each expression in each block. Then, the values across the
1049 various blocks are combined (by following flow graph edges) to arrive at
1050 global values. Conceptually, each set of equations is independent. We
1051 may therefore solve all the equations in parallel, solve them one at a
1052 time, or pick any intermediate approach.
1053
1054 When you're going to need N two-dimensional bitmaps, each X (say, the
1055 number of blocks) by Y (say, the number of expressions), call this
1056 function. It's not important what X and Y represent; only that Y
1057 correspond to the things that can be done in parallel. This function will
1058 return an appropriate chunking factor C; you should solve C sets of
1059 equations in parallel. By going through this function, we can easily
1060 trade space against time; by solving fewer equations in parallel we use
1061 less space. */
1062
1063 static int
1064 get_bitmap_width (n, x, y)
1065 int n;
1066 int x;
1067 int y;
1068 {
1069 /* It's not really worth figuring out *exactly* how much memory will
1070 be used by a particular choice. The important thing is to get
1071 something approximately right. */
1072 size_t max_bitmap_memory = 10 * 1024 * 1024;
1073
1074 /* The number of bytes we'd use for a single column of minimum
1075 width. */
1076 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1077
1078 /* Often, it's reasonable just to solve all the equations in
1079 parallel. */
1080 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1081 return y;
1082
1083 /* Otherwise, pick the largest width we can, without going over the
1084 limit. */
1085 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1086 / column_size);
1087 }
1088 \f
1089 /* Compute the local properties of each recorded expression.
1090
1091 Local properties are those that are defined by the block, irrespective of
1092 other blocks.
1093
1094 An expression is transparent in a block if its operands are not modified
1095 in the block.
1096
1097 An expression is computed (locally available) in a block if it is computed
1098 at least once and expression would contain the same value if the
1099 computation was moved to the end of the block.
1100
1101 An expression is locally anticipatable in a block if it is computed at
1102 least once and expression would contain the same value if the computation
1103 was moved to the beginning of the block.
1104
1105 We call this routine for cprop, pre and code hoisting. They all compute
1106 basically the same information and thus can easily share this code.
1107
1108 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1109 properties. If NULL, then it is not necessary to compute or record that
1110 particular property.
1111
1112 SETP controls which hash table to look at. If zero, this routine looks at
1113 the expr hash table; if nonzero this routine looks at the set hash table.
1114 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1115 ABSALTERED. */
1116
1117 static void
1118 compute_local_properties (transp, comp, antloc, setp)
1119 sbitmap *transp;
1120 sbitmap *comp;
1121 sbitmap *antloc;
1122 int setp;
1123 {
1124 unsigned int i, hash_table_size;
1125 struct expr **hash_table;
1126
1127 /* Initialize any bitmaps that were passed in. */
1128 if (transp)
1129 {
1130 if (setp)
1131 sbitmap_vector_zero (transp, n_basic_blocks);
1132 else
1133 sbitmap_vector_ones (transp, n_basic_blocks);
1134 }
1135
1136 if (comp)
1137 sbitmap_vector_zero (comp, n_basic_blocks);
1138 if (antloc)
1139 sbitmap_vector_zero (antloc, n_basic_blocks);
1140
1141 /* We use the same code for cprop, pre and hoisting. For cprop
1142 we care about the set hash table, for pre and hoisting we
1143 care about the expr hash table. */
1144 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1145 hash_table = setp ? set_hash_table : expr_hash_table;
1146
1147 for (i = 0; i < hash_table_size; i++)
1148 {
1149 struct expr *expr;
1150
1151 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1152 {
1153 int indx = expr->bitmap_index;
1154 struct occr *occr;
1155
1156 /* The expression is transparent in this block if it is not killed.
1157 We start by assuming all are transparent [none are killed], and
1158 then reset the bits for those that are. */
1159 if (transp)
1160 compute_transp (expr->expr, indx, transp, setp);
1161
1162 /* The occurrences recorded in antic_occr are exactly those that
1163 we want to set to non-zero in ANTLOC. */
1164 if (antloc)
1165 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1166 {
1167 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1168
1169 /* While we're scanning the table, this is a good place to
1170 initialize this. */
1171 occr->deleted_p = 0;
1172 }
1173
1174 /* The occurrences recorded in avail_occr are exactly those that
1175 we want to set to non-zero in COMP. */
1176 if (comp)
1177 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1178 {
1179 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1180
1181 /* While we're scanning the table, this is a good place to
1182 initialize this. */
1183 occr->copied_p = 0;
1184 }
1185
1186 /* While we're scanning the table, this is a good place to
1187 initialize this. */
1188 expr->reaching_reg = 0;
1189 }
1190 }
1191 }
1192 \f
1193 /* Register set information.
1194
1195 `reg_set_table' records where each register is set or otherwise
1196 modified. */
1197
1198 static struct obstack reg_set_obstack;
1199
1200 static void
1201 alloc_reg_set_mem (n_regs)
1202 int n_regs;
1203 {
1204 unsigned int n;
1205
1206 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1207 n = reg_set_table_size * sizeof (struct reg_set *);
1208 reg_set_table = (struct reg_set **) gmalloc (n);
1209 memset ((char *) reg_set_table, 0, n);
1210
1211 gcc_obstack_init (&reg_set_obstack);
1212 }
1213
1214 static void
1215 free_reg_set_mem ()
1216 {
1217 free (reg_set_table);
1218 obstack_free (&reg_set_obstack, NULL);
1219 }
1220
1221 /* Record REGNO in the reg_set table. */
1222
1223 static void
1224 record_one_set (regno, insn)
1225 int regno;
1226 rtx insn;
1227 {
1228 /* Allocate a new reg_set element and link it onto the list. */
1229 struct reg_set *new_reg_info;
1230
1231 /* If the table isn't big enough, enlarge it. */
1232 if (regno >= reg_set_table_size)
1233 {
1234 int new_size = regno + REG_SET_TABLE_SLOP;
1235
1236 reg_set_table
1237 = (struct reg_set **) grealloc ((char *) reg_set_table,
1238 new_size * sizeof (struct reg_set *));
1239 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1240 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1241 reg_set_table_size = new_size;
1242 }
1243
1244 new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
1245 sizeof (struct reg_set));
1246 bytes_used += sizeof (struct reg_set);
1247 new_reg_info->insn = insn;
1248 new_reg_info->next = reg_set_table[regno];
1249 reg_set_table[regno] = new_reg_info;
1250 }
1251
1252 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1253 an insn. The DATA is really the instruction in which the SET is
1254 occurring. */
1255
1256 static void
1257 record_set_info (dest, setter, data)
1258 rtx dest, setter ATTRIBUTE_UNUSED;
1259 void *data;
1260 {
1261 rtx record_set_insn = (rtx) data;
1262
1263 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1264 record_one_set (REGNO (dest), record_set_insn);
1265 }
1266
1267 /* Scan the function and record each set of each pseudo-register.
1268
1269 This is called once, at the start of the gcse pass. See the comments for
1270 `reg_set_table' for further documenation. */
1271
1272 static void
1273 compute_sets (f)
1274 rtx f;
1275 {
1276 rtx insn;
1277
1278 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1279 if (INSN_P (insn))
1280 note_stores (PATTERN (insn), record_set_info, insn);
1281 }
1282 \f
1283 /* Hash table support. */
1284
1285 /* For each register, the cuid of the first/last insn in the block
1286 that set it, or -1 if not set. */
1287 #define NEVER_SET -1
1288
1289 struct reg_avail_info
1290 {
1291 int last_bb;
1292 int first_set;
1293 int last_set;
1294 };
1295
1296 static struct reg_avail_info *reg_avail_info;
1297 static int current_bb;
1298
1299
1300 /* See whether X, the source of a set, is something we want to consider for
1301 GCSE. */
1302
1303 static int
1304 want_to_gcse_p (x)
1305 rtx x;
1306 {
1307 static rtx test_insn = 0;
1308 int num_clobbers = 0;
1309 int icode;
1310
1311 switch (GET_CODE (x))
1312 {
1313 case REG:
1314 case SUBREG:
1315 case CONST_INT:
1316 case CONST_DOUBLE:
1317 case CALL:
1318 return 0;
1319
1320 default:
1321 break;
1322 }
1323
1324 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1325 if (general_operand (x, GET_MODE (x)))
1326 return 1;
1327 else if (GET_MODE (x) == VOIDmode)
1328 return 0;
1329
1330 /* Otherwise, check if we can make a valid insn from it. First initialize
1331 our test insn if we haven't already. */
1332 if (test_insn == 0)
1333 {
1334 test_insn
1335 = make_insn_raw (gen_rtx_SET (VOIDmode,
1336 gen_rtx_REG (word_mode,
1337 FIRST_PSEUDO_REGISTER * 2),
1338 const0_rtx));
1339 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1340 ggc_add_rtx_root (&test_insn, 1);
1341 }
1342
1343 /* Now make an insn like the one we would make when GCSE'ing and see if
1344 valid. */
1345 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1346 SET_SRC (PATTERN (test_insn)) = x;
1347 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1348 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1349 }
1350
1351 /* Return non-zero if the operands of expression X are unchanged from the
1352 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1353 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1354
1355 static int
1356 oprs_unchanged_p (x, insn, avail_p)
1357 rtx x, insn;
1358 int avail_p;
1359 {
1360 int i, j;
1361 enum rtx_code code;
1362 const char *fmt;
1363
1364 if (x == 0)
1365 return 1;
1366
1367 code = GET_CODE (x);
1368 switch (code)
1369 {
1370 case REG:
1371 {
1372 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1373
1374 if (info->last_bb != current_bb)
1375 return 1;
1376 if (avail_p)
1377 return info->last_set < INSN_CUID (insn);
1378 else
1379 return info->first_set >= INSN_CUID (insn);
1380 }
1381
1382 case MEM:
1383 if (load_killed_in_block_p (BASIC_BLOCK (current_bb), INSN_CUID (insn),
1384 x, avail_p))
1385 return 0;
1386 else
1387 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1388
1389 case PRE_DEC:
1390 case PRE_INC:
1391 case POST_DEC:
1392 case POST_INC:
1393 case PRE_MODIFY:
1394 case POST_MODIFY:
1395 return 0;
1396
1397 case PC:
1398 case CC0: /*FIXME*/
1399 case CONST:
1400 case CONST_INT:
1401 case CONST_DOUBLE:
1402 case SYMBOL_REF:
1403 case LABEL_REF:
1404 case ADDR_VEC:
1405 case ADDR_DIFF_VEC:
1406 return 1;
1407
1408 default:
1409 break;
1410 }
1411
1412 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1413 {
1414 if (fmt[i] == 'e')
1415 {
1416 /* If we are about to do the last recursive call needed at this
1417 level, change it into iteration. This function is called enough
1418 to be worth it. */
1419 if (i == 0)
1420 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1421
1422 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1423 return 0;
1424 }
1425 else if (fmt[i] == 'E')
1426 for (j = 0; j < XVECLEN (x, i); j++)
1427 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1428 return 0;
1429 }
1430
1431 return 1;
1432 }
1433
1434 /* Used for communication between mems_conflict_for_gcse_p and
1435 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1436 conflict between two memory references. */
1437 static int gcse_mems_conflict_p;
1438
1439 /* Used for communication between mems_conflict_for_gcse_p and
1440 load_killed_in_block_p. A memory reference for a load instruction,
1441 mems_conflict_for_gcse_p will see if a memory store conflicts with
1442 this memory load. */
1443 static rtx gcse_mem_operand;
1444
1445 /* DEST is the output of an instruction. If it is a memory reference, and
1446 possibly conflicts with the load found in gcse_mem_operand, then set
1447 gcse_mems_conflict_p to a nonzero value. */
1448
1449 static void
1450 mems_conflict_for_gcse_p (dest, setter, data)
1451 rtx dest, setter ATTRIBUTE_UNUSED;
1452 void *data ATTRIBUTE_UNUSED;
1453 {
1454 while (GET_CODE (dest) == SUBREG
1455 || GET_CODE (dest) == ZERO_EXTRACT
1456 || GET_CODE (dest) == SIGN_EXTRACT
1457 || GET_CODE (dest) == STRICT_LOW_PART)
1458 dest = XEXP (dest, 0);
1459
1460 /* If DEST is not a MEM, then it will not conflict with the load. Note
1461 that function calls are assumed to clobber memory, but are handled
1462 elsewhere. */
1463 if (GET_CODE (dest) != MEM)
1464 return;
1465
1466 /* If we are setting a MEM in our list of specially recognized MEMs,
1467 don't mark as killed this time. */
1468
1469 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1470 {
1471 if (!find_rtx_in_ldst (dest))
1472 gcse_mems_conflict_p = 1;
1473 return;
1474 }
1475
1476 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1477 rtx_addr_varies_p))
1478 gcse_mems_conflict_p = 1;
1479 }
1480
1481 /* Return nonzero if the expression in X (a memory reference) is killed
1482 in block BB before or after the insn with the CUID in UID_LIMIT.
1483 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1484 before UID_LIMIT.
1485
1486 To check the entire block, set UID_LIMIT to max_uid + 1 and
1487 AVAIL_P to 0. */
1488
1489 static int
1490 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1491 basic_block bb;
1492 int uid_limit;
1493 rtx x;
1494 int avail_p;
1495 {
1496 rtx list_entry = modify_mem_list[bb->index];
1497 while (list_entry)
1498 {
1499 rtx setter;
1500 /* Ignore entries in the list that do not apply. */
1501 if ((avail_p
1502 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1503 || (! avail_p
1504 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1505 {
1506 list_entry = XEXP (list_entry, 1);
1507 continue;
1508 }
1509
1510 setter = XEXP (list_entry, 0);
1511
1512 /* If SETTER is a call everything is clobbered. Note that calls
1513 to pure functions are never put on the list, so we need not
1514 worry about them. */
1515 if (GET_CODE (setter) == CALL_INSN)
1516 return 1;
1517
1518 /* SETTER must be an INSN of some kind that sets memory. Call
1519 note_stores to examine each hunk of memory that is modified.
1520
1521 The note_stores interface is pretty limited, so we have to
1522 communicate via global variables. Yuk. */
1523 gcse_mem_operand = x;
1524 gcse_mems_conflict_p = 0;
1525 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1526 if (gcse_mems_conflict_p)
1527 return 1;
1528 list_entry = XEXP (list_entry, 1);
1529 }
1530 return 0;
1531 }
1532
1533 /* Return non-zero if the operands of expression X are unchanged from
1534 the start of INSN's basic block up to but not including INSN. */
1535
1536 static int
1537 oprs_anticipatable_p (x, insn)
1538 rtx x, insn;
1539 {
1540 return oprs_unchanged_p (x, insn, 0);
1541 }
1542
1543 /* Return non-zero if the operands of expression X are unchanged from
1544 INSN to the end of INSN's basic block. */
1545
1546 static int
1547 oprs_available_p (x, insn)
1548 rtx x, insn;
1549 {
1550 return oprs_unchanged_p (x, insn, 1);
1551 }
1552
1553 /* Hash expression X.
1554
1555 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1556 indicating if a volatile operand is found or if the expression contains
1557 something we don't want to insert in the table.
1558
1559 ??? One might want to merge this with canon_hash. Later. */
1560
1561 static unsigned int
1562 hash_expr (x, mode, do_not_record_p, hash_table_size)
1563 rtx x;
1564 enum machine_mode mode;
1565 int *do_not_record_p;
1566 int hash_table_size;
1567 {
1568 unsigned int hash;
1569
1570 *do_not_record_p = 0;
1571
1572 hash = hash_expr_1 (x, mode, do_not_record_p);
1573 return hash % hash_table_size;
1574 }
1575
1576 /* Hash a string. Just add its bytes up. */
1577
1578 static inline unsigned
1579 hash_string_1 (ps)
1580 const char *ps;
1581 {
1582 unsigned hash = 0;
1583 const unsigned char *p = (const unsigned char *)ps;
1584
1585 if (p)
1586 while (*p)
1587 hash += *p++;
1588
1589 return hash;
1590 }
1591
1592 /* Subroutine of hash_expr to do the actual work. */
1593
1594 static unsigned int
1595 hash_expr_1 (x, mode, do_not_record_p)
1596 rtx x;
1597 enum machine_mode mode;
1598 int *do_not_record_p;
1599 {
1600 int i, j;
1601 unsigned hash = 0;
1602 enum rtx_code code;
1603 const char *fmt;
1604
1605 /* Used to turn recursion into iteration. We can't rely on GCC's
1606 tail-recursion eliminatio since we need to keep accumulating values
1607 in HASH. */
1608
1609 if (x == 0)
1610 return hash;
1611
1612 repeat:
1613 code = GET_CODE (x);
1614 switch (code)
1615 {
1616 case REG:
1617 hash += ((unsigned int) REG << 7) + REGNO (x);
1618 return hash;
1619
1620 case CONST_INT:
1621 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1622 + (unsigned int) INTVAL (x));
1623 return hash;
1624
1625 case CONST_DOUBLE:
1626 /* This is like the general case, except that it only counts
1627 the integers representing the constant. */
1628 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1629 if (GET_MODE (x) != VOIDmode)
1630 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1631 hash += (unsigned int) XWINT (x, i);
1632 else
1633 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1634 + (unsigned int) CONST_DOUBLE_HIGH (x));
1635 return hash;
1636
1637 /* Assume there is only one rtx object for any given label. */
1638 case LABEL_REF:
1639 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1640 differences and differences between each stage's debugging dumps. */
1641 hash += (((unsigned int) LABEL_REF << 7)
1642 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1643 return hash;
1644
1645 case SYMBOL_REF:
1646 {
1647 /* Don't hash on the symbol's address to avoid bootstrap differences.
1648 Different hash values may cause expressions to be recorded in
1649 different orders and thus different registers to be used in the
1650 final assembler. This also avoids differences in the dump files
1651 between various stages. */
1652 unsigned int h = 0;
1653 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1654
1655 while (*p)
1656 h += (h << 7) + *p++; /* ??? revisit */
1657
1658 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1659 return hash;
1660 }
1661
1662 case MEM:
1663 if (MEM_VOLATILE_P (x))
1664 {
1665 *do_not_record_p = 1;
1666 return 0;
1667 }
1668
1669 hash += (unsigned int) MEM;
1670 hash += MEM_ALIAS_SET (x);
1671 x = XEXP (x, 0);
1672 goto repeat;
1673
1674 case PRE_DEC:
1675 case PRE_INC:
1676 case POST_DEC:
1677 case POST_INC:
1678 case PC:
1679 case CC0:
1680 case CALL:
1681 case UNSPEC_VOLATILE:
1682 *do_not_record_p = 1;
1683 return 0;
1684
1685 case ASM_OPERANDS:
1686 if (MEM_VOLATILE_P (x))
1687 {
1688 *do_not_record_p = 1;
1689 return 0;
1690 }
1691 else
1692 {
1693 /* We don't want to take the filename and line into account. */
1694 hash += (unsigned) code + (unsigned) GET_MODE (x)
1695 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1696 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1697 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1698
1699 if (ASM_OPERANDS_INPUT_LENGTH (x))
1700 {
1701 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1702 {
1703 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1704 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1705 do_not_record_p)
1706 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1707 (x, i)));
1708 }
1709
1710 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1711 x = ASM_OPERANDS_INPUT (x, 0);
1712 mode = GET_MODE (x);
1713 goto repeat;
1714 }
1715 return hash;
1716 }
1717
1718 default:
1719 break;
1720 }
1721
1722 hash += (unsigned) code + (unsigned) GET_MODE (x);
1723 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1724 {
1725 if (fmt[i] == 'e')
1726 {
1727 /* If we are about to do the last recursive call
1728 needed at this level, change it into iteration.
1729 This function is called enough to be worth it. */
1730 if (i == 0)
1731 {
1732 x = XEXP (x, i);
1733 goto repeat;
1734 }
1735
1736 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1737 if (*do_not_record_p)
1738 return 0;
1739 }
1740
1741 else if (fmt[i] == 'E')
1742 for (j = 0; j < XVECLEN (x, i); j++)
1743 {
1744 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1745 if (*do_not_record_p)
1746 return 0;
1747 }
1748
1749 else if (fmt[i] == 's')
1750 hash += hash_string_1 (XSTR (x, i));
1751 else if (fmt[i] == 'i')
1752 hash += (unsigned int) XINT (x, i);
1753 else
1754 abort ();
1755 }
1756
1757 return hash;
1758 }
1759
1760 /* Hash a set of register REGNO.
1761
1762 Sets are hashed on the register that is set. This simplifies the PRE copy
1763 propagation code.
1764
1765 ??? May need to make things more elaborate. Later, as necessary. */
1766
1767 static unsigned int
1768 hash_set (regno, hash_table_size)
1769 int regno;
1770 int hash_table_size;
1771 {
1772 unsigned int hash;
1773
1774 hash = regno;
1775 return hash % hash_table_size;
1776 }
1777
1778 /* Return non-zero if exp1 is equivalent to exp2.
1779 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1780
1781 static int
1782 expr_equiv_p (x, y)
1783 rtx x, y;
1784 {
1785 int i, j;
1786 enum rtx_code code;
1787 const char *fmt;
1788
1789 if (x == y)
1790 return 1;
1791
1792 if (x == 0 || y == 0)
1793 return x == y;
1794
1795 code = GET_CODE (x);
1796 if (code != GET_CODE (y))
1797 return 0;
1798
1799 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1800 if (GET_MODE (x) != GET_MODE (y))
1801 return 0;
1802
1803 switch (code)
1804 {
1805 case PC:
1806 case CC0:
1807 return x == y;
1808
1809 case CONST_INT:
1810 return INTVAL (x) == INTVAL (y);
1811
1812 case LABEL_REF:
1813 return XEXP (x, 0) == XEXP (y, 0);
1814
1815 case SYMBOL_REF:
1816 return XSTR (x, 0) == XSTR (y, 0);
1817
1818 case REG:
1819 return REGNO (x) == REGNO (y);
1820
1821 case MEM:
1822 /* Can't merge two expressions in different alias sets, since we can
1823 decide that the expression is transparent in a block when it isn't,
1824 due to it being set with the different alias set. */
1825 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1826 return 0;
1827 break;
1828
1829 /* For commutative operations, check both orders. */
1830 case PLUS:
1831 case MULT:
1832 case AND:
1833 case IOR:
1834 case XOR:
1835 case NE:
1836 case EQ:
1837 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1838 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1839 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1840 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1841
1842 case ASM_OPERANDS:
1843 /* We don't use the generic code below because we want to
1844 disregard filename and line numbers. */
1845
1846 /* A volatile asm isn't equivalent to any other. */
1847 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1848 return 0;
1849
1850 if (GET_MODE (x) != GET_MODE (y)
1851 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1852 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1853 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1854 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1855 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1856 return 0;
1857
1858 if (ASM_OPERANDS_INPUT_LENGTH (x))
1859 {
1860 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1861 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1862 ASM_OPERANDS_INPUT (y, i))
1863 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1864 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1865 return 0;
1866 }
1867
1868 return 1;
1869
1870 default:
1871 break;
1872 }
1873
1874 /* Compare the elements. If any pair of corresponding elements
1875 fail to match, return 0 for the whole thing. */
1876
1877 fmt = GET_RTX_FORMAT (code);
1878 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1879 {
1880 switch (fmt[i])
1881 {
1882 case 'e':
1883 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1884 return 0;
1885 break;
1886
1887 case 'E':
1888 if (XVECLEN (x, i) != XVECLEN (y, i))
1889 return 0;
1890 for (j = 0; j < XVECLEN (x, i); j++)
1891 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1892 return 0;
1893 break;
1894
1895 case 's':
1896 if (strcmp (XSTR (x, i), XSTR (y, i)))
1897 return 0;
1898 break;
1899
1900 case 'i':
1901 if (XINT (x, i) != XINT (y, i))
1902 return 0;
1903 break;
1904
1905 case 'w':
1906 if (XWINT (x, i) != XWINT (y, i))
1907 return 0;
1908 break;
1909
1910 case '0':
1911 break;
1912
1913 default:
1914 abort ();
1915 }
1916 }
1917
1918 return 1;
1919 }
1920
1921 /* Insert expression X in INSN in the hash table.
1922 If it is already present, record it as the last occurrence in INSN's
1923 basic block.
1924
1925 MODE is the mode of the value X is being stored into.
1926 It is only used if X is a CONST_INT.
1927
1928 ANTIC_P is non-zero if X is an anticipatable expression.
1929 AVAIL_P is non-zero if X is an available expression. */
1930
1931 static void
1932 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1933 rtx x;
1934 enum machine_mode mode;
1935 rtx insn;
1936 int antic_p, avail_p;
1937 {
1938 int found, do_not_record_p;
1939 unsigned int hash;
1940 struct expr *cur_expr, *last_expr = NULL;
1941 struct occr *antic_occr, *avail_occr;
1942 struct occr *last_occr = NULL;
1943
1944 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1945
1946 /* Do not insert expression in table if it contains volatile operands,
1947 or if hash_expr determines the expression is something we don't want
1948 to or can't handle. */
1949 if (do_not_record_p)
1950 return;
1951
1952 cur_expr = expr_hash_table[hash];
1953 found = 0;
1954
1955 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1956 {
1957 /* If the expression isn't found, save a pointer to the end of
1958 the list. */
1959 last_expr = cur_expr;
1960 cur_expr = cur_expr->next_same_hash;
1961 }
1962
1963 if (! found)
1964 {
1965 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1966 bytes_used += sizeof (struct expr);
1967 if (expr_hash_table[hash] == NULL)
1968 /* This is the first pattern that hashed to this index. */
1969 expr_hash_table[hash] = cur_expr;
1970 else
1971 /* Add EXPR to end of this hash chain. */
1972 last_expr->next_same_hash = cur_expr;
1973
1974 /* Set the fields of the expr element. */
1975 cur_expr->expr = x;
1976 cur_expr->bitmap_index = n_exprs++;
1977 cur_expr->next_same_hash = NULL;
1978 cur_expr->antic_occr = NULL;
1979 cur_expr->avail_occr = NULL;
1980 }
1981
1982 /* Now record the occurrence(s). */
1983 if (antic_p)
1984 {
1985 antic_occr = cur_expr->antic_occr;
1986
1987 /* Search for another occurrence in the same basic block. */
1988 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1989 {
1990 /* If an occurrence isn't found, save a pointer to the end of
1991 the list. */
1992 last_occr = antic_occr;
1993 antic_occr = antic_occr->next;
1994 }
1995
1996 if (antic_occr)
1997 /* Found another instance of the expression in the same basic block.
1998 Prefer the currently recorded one. We want the first one in the
1999 block and the block is scanned from start to end. */
2000 ; /* nothing to do */
2001 else
2002 {
2003 /* First occurrence of this expression in this basic block. */
2004 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2005 bytes_used += sizeof (struct occr);
2006 /* First occurrence of this expression in any block? */
2007 if (cur_expr->antic_occr == NULL)
2008 cur_expr->antic_occr = antic_occr;
2009 else
2010 last_occr->next = antic_occr;
2011
2012 antic_occr->insn = insn;
2013 antic_occr->next = NULL;
2014 }
2015 }
2016
2017 if (avail_p)
2018 {
2019 avail_occr = cur_expr->avail_occr;
2020
2021 /* Search for another occurrence in the same basic block. */
2022 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2023 {
2024 /* If an occurrence isn't found, save a pointer to the end of
2025 the list. */
2026 last_occr = avail_occr;
2027 avail_occr = avail_occr->next;
2028 }
2029
2030 if (avail_occr)
2031 /* Found another instance of the expression in the same basic block.
2032 Prefer this occurrence to the currently recorded one. We want
2033 the last one in the block and the block is scanned from start
2034 to end. */
2035 avail_occr->insn = insn;
2036 else
2037 {
2038 /* First occurrence of this expression in this basic block. */
2039 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2040 bytes_used += sizeof (struct occr);
2041
2042 /* First occurrence of this expression in any block? */
2043 if (cur_expr->avail_occr == NULL)
2044 cur_expr->avail_occr = avail_occr;
2045 else
2046 last_occr->next = avail_occr;
2047
2048 avail_occr->insn = insn;
2049 avail_occr->next = NULL;
2050 }
2051 }
2052 }
2053
2054 /* Insert pattern X in INSN in the hash table.
2055 X is a SET of a reg to either another reg or a constant.
2056 If it is already present, record it as the last occurrence in INSN's
2057 basic block. */
2058
2059 static void
2060 insert_set_in_table (x, insn)
2061 rtx x;
2062 rtx insn;
2063 {
2064 int found;
2065 unsigned int hash;
2066 struct expr *cur_expr, *last_expr = NULL;
2067 struct occr *cur_occr, *last_occr = NULL;
2068
2069 if (GET_CODE (x) != SET
2070 || GET_CODE (SET_DEST (x)) != REG)
2071 abort ();
2072
2073 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
2074
2075 cur_expr = set_hash_table[hash];
2076 found = 0;
2077
2078 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2079 {
2080 /* If the expression isn't found, save a pointer to the end of
2081 the list. */
2082 last_expr = cur_expr;
2083 cur_expr = cur_expr->next_same_hash;
2084 }
2085
2086 if (! found)
2087 {
2088 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2089 bytes_used += sizeof (struct expr);
2090 if (set_hash_table[hash] == NULL)
2091 /* This is the first pattern that hashed to this index. */
2092 set_hash_table[hash] = cur_expr;
2093 else
2094 /* Add EXPR to end of this hash chain. */
2095 last_expr->next_same_hash = cur_expr;
2096
2097 /* Set the fields of the expr element.
2098 We must copy X because it can be modified when copy propagation is
2099 performed on its operands. */
2100 cur_expr->expr = copy_rtx (x);
2101 cur_expr->bitmap_index = n_sets++;
2102 cur_expr->next_same_hash = NULL;
2103 cur_expr->antic_occr = NULL;
2104 cur_expr->avail_occr = NULL;
2105 }
2106
2107 /* Now record the occurrence. */
2108 cur_occr = cur_expr->avail_occr;
2109
2110 /* Search for another occurrence in the same basic block. */
2111 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2112 {
2113 /* If an occurrence isn't found, save a pointer to the end of
2114 the list. */
2115 last_occr = cur_occr;
2116 cur_occr = cur_occr->next;
2117 }
2118
2119 if (cur_occr)
2120 /* Found another instance of the expression in the same basic block.
2121 Prefer this occurrence to the currently recorded one. We want the
2122 last one in the block and the block is scanned from start to end. */
2123 cur_occr->insn = insn;
2124 else
2125 {
2126 /* First occurrence of this expression in this basic block. */
2127 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2128 bytes_used += sizeof (struct occr);
2129
2130 /* First occurrence of this expression in any block? */
2131 if (cur_expr->avail_occr == NULL)
2132 cur_expr->avail_occr = cur_occr;
2133 else
2134 last_occr->next = cur_occr;
2135
2136 cur_occr->insn = insn;
2137 cur_occr->next = NULL;
2138 }
2139 }
2140
2141 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2142 non-zero, this is for the assignment hash table, otherwise it is for the
2143 expression hash table. */
2144
2145 static void
2146 hash_scan_set (pat, insn, set_p)
2147 rtx pat, insn;
2148 int set_p;
2149 {
2150 rtx src = SET_SRC (pat);
2151 rtx dest = SET_DEST (pat);
2152 rtx note;
2153
2154 if (GET_CODE (src) == CALL)
2155 hash_scan_call (src, insn);
2156
2157 else if (GET_CODE (dest) == REG)
2158 {
2159 unsigned int regno = REGNO (dest);
2160 rtx tmp;
2161
2162 /* If this is a single set and we are doing constant propagation,
2163 see if a REG_NOTE shows this equivalent to a constant. */
2164 if (set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2165 && CONSTANT_P (XEXP (note, 0)))
2166 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2167
2168 /* Only record sets of pseudo-regs in the hash table. */
2169 if (! set_p
2170 && regno >= FIRST_PSEUDO_REGISTER
2171 /* Don't GCSE something if we can't do a reg/reg copy. */
2172 && can_copy_p [GET_MODE (dest)]
2173 /* Is SET_SRC something we want to gcse? */
2174 && want_to_gcse_p (src)
2175 /* Don't CSE a nop. */
2176 && ! set_noop_p (pat)
2177 /* Don't GCSE if it has attached REG_EQUIV note.
2178 At this point this only function parameters should have
2179 REG_EQUIV notes and if the argument slot is used somewhere
2180 explicitly, it means address of parameter has been taken,
2181 so we should not extend the lifetime of the pseudo. */
2182 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2183 || GET_CODE (XEXP (note, 0)) != MEM))
2184 {
2185 /* An expression is not anticipatable if its operands are
2186 modified before this insn or if this is not the only SET in
2187 this insn. */
2188 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2189 /* An expression is not available if its operands are
2190 subsequently modified, including this insn. It's also not
2191 available if this is a branch, because we can't insert
2192 a set after the branch. */
2193 int avail_p = (oprs_available_p (src, insn)
2194 && ! JUMP_P (insn));
2195
2196 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
2197 }
2198
2199 /* Record sets for constant/copy propagation. */
2200 else if (set_p
2201 && regno >= FIRST_PSEUDO_REGISTER
2202 && ((GET_CODE (src) == REG
2203 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2204 && can_copy_p [GET_MODE (dest)]
2205 && REGNO (src) != regno)
2206 || GET_CODE (src) == CONST_INT
2207 || GET_CODE (src) == SYMBOL_REF
2208 || GET_CODE (src) == CONST_DOUBLE)
2209 /* A copy is not available if its src or dest is subsequently
2210 modified. Here we want to search from INSN+1 on, but
2211 oprs_available_p searches from INSN on. */
2212 && (insn == BLOCK_END (BLOCK_NUM (insn))
2213 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2214 && oprs_available_p (pat, tmp))))
2215 insert_set_in_table (pat, insn);
2216 }
2217 }
2218
2219 static void
2220 hash_scan_clobber (x, insn)
2221 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2222 {
2223 /* Currently nothing to do. */
2224 }
2225
2226 static void
2227 hash_scan_call (x, insn)
2228 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2229 {
2230 /* Currently nothing to do. */
2231 }
2232
2233 /* Process INSN and add hash table entries as appropriate.
2234
2235 Only available expressions that set a single pseudo-reg are recorded.
2236
2237 Single sets in a PARALLEL could be handled, but it's an extra complication
2238 that isn't dealt with right now. The trick is handling the CLOBBERs that
2239 are also in the PARALLEL. Later.
2240
2241 If SET_P is non-zero, this is for the assignment hash table,
2242 otherwise it is for the expression hash table.
2243 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2244 not record any expressions. */
2245
2246 static void
2247 hash_scan_insn (insn, set_p, in_libcall_block)
2248 rtx insn;
2249 int set_p;
2250 int in_libcall_block;
2251 {
2252 rtx pat = PATTERN (insn);
2253 int i;
2254
2255 if (in_libcall_block)
2256 return;
2257
2258 /* Pick out the sets of INSN and for other forms of instructions record
2259 what's been modified. */
2260
2261 if (GET_CODE (pat) == SET)
2262 hash_scan_set (pat, insn, set_p);
2263 else if (GET_CODE (pat) == PARALLEL)
2264 for (i = 0; i < XVECLEN (pat, 0); i++)
2265 {
2266 rtx x = XVECEXP (pat, 0, i);
2267
2268 if (GET_CODE (x) == SET)
2269 hash_scan_set (x, insn, set_p);
2270 else if (GET_CODE (x) == CLOBBER)
2271 hash_scan_clobber (x, insn);
2272 else if (GET_CODE (x) == CALL)
2273 hash_scan_call (x, insn);
2274 }
2275
2276 else if (GET_CODE (pat) == CLOBBER)
2277 hash_scan_clobber (pat, insn);
2278 else if (GET_CODE (pat) == CALL)
2279 hash_scan_call (pat, insn);
2280 }
2281
2282 static void
2283 dump_hash_table (file, name, table, table_size, total_size)
2284 FILE *file;
2285 const char *name;
2286 struct expr **table;
2287 int table_size, total_size;
2288 {
2289 int i;
2290 /* Flattened out table, so it's printed in proper order. */
2291 struct expr **flat_table;
2292 unsigned int *hash_val;
2293 struct expr *expr;
2294
2295 flat_table
2296 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
2297 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
2298
2299 for (i = 0; i < table_size; i++)
2300 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
2301 {
2302 flat_table[expr->bitmap_index] = expr;
2303 hash_val[expr->bitmap_index] = i;
2304 }
2305
2306 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2307 name, table_size, total_size);
2308
2309 for (i = 0; i < total_size; i++)
2310 if (flat_table[i] != 0)
2311 {
2312 expr = flat_table[i];
2313 fprintf (file, "Index %d (hash value %d)\n ",
2314 expr->bitmap_index, hash_val[i]);
2315 print_rtl (file, expr->expr);
2316 fprintf (file, "\n");
2317 }
2318
2319 fprintf (file, "\n");
2320
2321 free (flat_table);
2322 free (hash_val);
2323 }
2324
2325 /* Record register first/last/block set information for REGNO in INSN.
2326
2327 first_set records the first place in the block where the register
2328 is set and is used to compute "anticipatability".
2329
2330 last_set records the last place in the block where the register
2331 is set and is used to compute "availability".
2332
2333 last_bb records the block for which first_set and last_set are
2334 valid, as a quick test to invalidate them.
2335
2336 reg_set_in_block records whether the register is set in the block
2337 and is used to compute "transparency". */
2338
2339 static void
2340 record_last_reg_set_info (insn, regno)
2341 rtx insn;
2342 int regno;
2343 {
2344 struct reg_avail_info *info = &reg_avail_info[regno];
2345 int cuid = INSN_CUID (insn);
2346
2347 info->last_set = cuid;
2348 if (info->last_bb != current_bb)
2349 {
2350 info->last_bb = current_bb;
2351 info->first_set = cuid;
2352 SET_BIT (reg_set_in_block[current_bb], regno);
2353 }
2354 }
2355
2356
2357 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2358 Note we store a pair of elements in the list, so they have to be
2359 taken off pairwise. */
2360
2361 static void
2362 canon_list_insert (dest, unused1, v_insn)
2363 rtx dest ATTRIBUTE_UNUSED;
2364 rtx unused1 ATTRIBUTE_UNUSED;
2365 void * v_insn;
2366 {
2367 rtx dest_addr, insn;
2368
2369 while (GET_CODE (dest) == SUBREG
2370 || GET_CODE (dest) == ZERO_EXTRACT
2371 || GET_CODE (dest) == SIGN_EXTRACT
2372 || GET_CODE (dest) == STRICT_LOW_PART)
2373 dest = XEXP (dest, 0);
2374
2375 /* If DEST is not a MEM, then it will not conflict with a load. Note
2376 that function calls are assumed to clobber memory, but are handled
2377 elsewhere. */
2378
2379 if (GET_CODE (dest) != MEM)
2380 return;
2381
2382 dest_addr = get_addr (XEXP (dest, 0));
2383 dest_addr = canon_rtx (dest_addr);
2384 insn = (rtx) v_insn;
2385
2386 canon_modify_mem_list[BLOCK_NUM (insn)] =
2387 alloc_INSN_LIST (dest_addr, canon_modify_mem_list[BLOCK_NUM (insn)]);
2388 canon_modify_mem_list[BLOCK_NUM (insn)] =
2389 alloc_INSN_LIST (dest, canon_modify_mem_list[BLOCK_NUM (insn)]);
2390 bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
2391 }
2392
2393 /* Record memory modification information for INSN. We do not actually care
2394 about the memory location(s) that are set, or even how they are set (consider
2395 a CALL_INSN). We merely need to record which insns modify memory. */
2396
2397 static void
2398 record_last_mem_set_info (insn)
2399 rtx insn;
2400 {
2401 /* load_killed_in_block_p will handle the case of calls clobbering
2402 everything. */
2403 modify_mem_list[BLOCK_NUM (insn)] =
2404 alloc_INSN_LIST (insn, modify_mem_list[BLOCK_NUM (insn)]);
2405 bitmap_set_bit (modify_mem_list_set, BLOCK_NUM (insn));
2406
2407 if (GET_CODE (insn) == CALL_INSN)
2408 {
2409 /* Note that traversals of this loop (other than for free-ing)
2410 will break after encountering a CALL_INSN. So, there's no
2411 need to insert a pair of items, as canon_list_insert does. */
2412 canon_modify_mem_list[BLOCK_NUM (insn)] =
2413 alloc_INSN_LIST (insn, canon_modify_mem_list[BLOCK_NUM (insn)]);
2414 bitmap_set_bit (canon_modify_mem_list_set, BLOCK_NUM (insn));
2415 }
2416 else
2417 note_stores (PATTERN (insn), canon_list_insert, (void*)insn );
2418 }
2419
2420 /* Called from compute_hash_table via note_stores to handle one
2421 SET or CLOBBER in an insn. DATA is really the instruction in which
2422 the SET is taking place. */
2423
2424 static void
2425 record_last_set_info (dest, setter, data)
2426 rtx dest, setter ATTRIBUTE_UNUSED;
2427 void *data;
2428 {
2429 rtx last_set_insn = (rtx) data;
2430
2431 if (GET_CODE (dest) == SUBREG)
2432 dest = SUBREG_REG (dest);
2433
2434 if (GET_CODE (dest) == REG)
2435 record_last_reg_set_info (last_set_insn, REGNO (dest));
2436 else if (GET_CODE (dest) == MEM
2437 /* Ignore pushes, they clobber nothing. */
2438 && ! push_operand (dest, GET_MODE (dest)))
2439 record_last_mem_set_info (last_set_insn);
2440 }
2441
2442 /* Top level function to create an expression or assignment hash table.
2443
2444 Expression entries are placed in the hash table if
2445 - they are of the form (set (pseudo-reg) src),
2446 - src is something we want to perform GCSE on,
2447 - none of the operands are subsequently modified in the block
2448
2449 Assignment entries are placed in the hash table if
2450 - they are of the form (set (pseudo-reg) src),
2451 - src is something we want to perform const/copy propagation on,
2452 - none of the operands or target are subsequently modified in the block
2453
2454 Currently src must be a pseudo-reg or a const_int.
2455
2456 F is the first insn.
2457 SET_P is non-zero for computing the assignment hash table. */
2458
2459 static void
2460 compute_hash_table (set_p)
2461 int set_p;
2462 {
2463 unsigned int i;
2464
2465 /* While we compute the hash table we also compute a bit array of which
2466 registers are set in which blocks.
2467 ??? This isn't needed during const/copy propagation, but it's cheap to
2468 compute. Later. */
2469 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
2470
2471 /* re-Cache any INSN_LIST nodes we have allocated. */
2472 clear_modify_mem_tables ();
2473 /* Some working arrays used to track first and last set in each block. */
2474 reg_avail_info = (struct reg_avail_info*)
2475 gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2476
2477 for (i = 0; i < max_gcse_regno; ++i)
2478 reg_avail_info[i].last_bb = NEVER_SET;
2479
2480 for (current_bb = 0; current_bb < n_basic_blocks; current_bb++)
2481 {
2482 rtx insn;
2483 unsigned int regno;
2484 int in_libcall_block;
2485
2486 /* First pass over the instructions records information used to
2487 determine when registers and memory are first and last set.
2488 ??? hard-reg reg_set_in_block computation
2489 could be moved to compute_sets since they currently don't change. */
2490
2491 for (insn = BLOCK_HEAD (current_bb);
2492 insn && insn != NEXT_INSN (BLOCK_END (current_bb));
2493 insn = NEXT_INSN (insn))
2494 {
2495 if (! INSN_P (insn))
2496 continue;
2497
2498 if (GET_CODE (insn) == CALL_INSN)
2499 {
2500 bool clobbers_all = false;
2501 #ifdef NON_SAVING_SETJMP
2502 if (NON_SAVING_SETJMP
2503 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2504 clobbers_all = true;
2505 #endif
2506
2507 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2508 if (clobbers_all
2509 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2510 record_last_reg_set_info (insn, regno);
2511
2512 mark_call (insn);
2513 }
2514
2515 note_stores (PATTERN (insn), record_last_set_info, insn);
2516 }
2517
2518 /* The next pass builds the hash table. */
2519
2520 for (insn = BLOCK_HEAD (current_bb), in_libcall_block = 0;
2521 insn && insn != NEXT_INSN (BLOCK_END (current_bb));
2522 insn = NEXT_INSN (insn))
2523 if (INSN_P (insn))
2524 {
2525 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2526 in_libcall_block = 1;
2527 else if (set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2528 in_libcall_block = 0;
2529 hash_scan_insn (insn, set_p, in_libcall_block);
2530 if (!set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2531 in_libcall_block = 0;
2532 }
2533 }
2534
2535 free (reg_avail_info);
2536 reg_avail_info = NULL;
2537 }
2538
2539 /* Allocate space for the set hash table.
2540 N_INSNS is the number of instructions in the function.
2541 It is used to determine the number of buckets to use. */
2542
2543 static void
2544 alloc_set_hash_table (n_insns)
2545 int n_insns;
2546 {
2547 int n;
2548
2549 set_hash_table_size = n_insns / 4;
2550 if (set_hash_table_size < 11)
2551 set_hash_table_size = 11;
2552
2553 /* Attempt to maintain efficient use of hash table.
2554 Making it an odd number is simplest for now.
2555 ??? Later take some measurements. */
2556 set_hash_table_size |= 1;
2557 n = set_hash_table_size * sizeof (struct expr *);
2558 set_hash_table = (struct expr **) gmalloc (n);
2559 }
2560
2561 /* Free things allocated by alloc_set_hash_table. */
2562
2563 static void
2564 free_set_hash_table ()
2565 {
2566 free (set_hash_table);
2567 }
2568
2569 /* Compute the hash table for doing copy/const propagation. */
2570
2571 static void
2572 compute_set_hash_table ()
2573 {
2574 /* Initialize count of number of entries in hash table. */
2575 n_sets = 0;
2576 memset ((char *) set_hash_table, 0,
2577 set_hash_table_size * sizeof (struct expr *));
2578
2579 compute_hash_table (1);
2580 }
2581
2582 /* Allocate space for the expression hash table.
2583 N_INSNS is the number of instructions in the function.
2584 It is used to determine the number of buckets to use. */
2585
2586 static void
2587 alloc_expr_hash_table (n_insns)
2588 unsigned int n_insns;
2589 {
2590 int n;
2591
2592 expr_hash_table_size = n_insns / 2;
2593 /* Make sure the amount is usable. */
2594 if (expr_hash_table_size < 11)
2595 expr_hash_table_size = 11;
2596
2597 /* Attempt to maintain efficient use of hash table.
2598 Making it an odd number is simplest for now.
2599 ??? Later take some measurements. */
2600 expr_hash_table_size |= 1;
2601 n = expr_hash_table_size * sizeof (struct expr *);
2602 expr_hash_table = (struct expr **) gmalloc (n);
2603 }
2604
2605 /* Free things allocated by alloc_expr_hash_table. */
2606
2607 static void
2608 free_expr_hash_table ()
2609 {
2610 free (expr_hash_table);
2611 }
2612
2613 /* Compute the hash table for doing GCSE. */
2614
2615 static void
2616 compute_expr_hash_table ()
2617 {
2618 /* Initialize count of number of entries in hash table. */
2619 n_exprs = 0;
2620 memset ((char *) expr_hash_table, 0,
2621 expr_hash_table_size * sizeof (struct expr *));
2622
2623 compute_hash_table (0);
2624 }
2625 \f
2626 /* Expression tracking support. */
2627
2628 /* Lookup pattern PAT in the expression table.
2629 The result is a pointer to the table entry, or NULL if not found. */
2630
2631 static struct expr *
2632 lookup_expr (pat)
2633 rtx pat;
2634 {
2635 int do_not_record_p;
2636 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2637 expr_hash_table_size);
2638 struct expr *expr;
2639
2640 if (do_not_record_p)
2641 return NULL;
2642
2643 expr = expr_hash_table[hash];
2644
2645 while (expr && ! expr_equiv_p (expr->expr, pat))
2646 expr = expr->next_same_hash;
2647
2648 return expr;
2649 }
2650
2651 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2652 matches it, otherwise return the first entry for REGNO. The result is a
2653 pointer to the table entry, or NULL if not found. */
2654
2655 static struct expr *
2656 lookup_set (regno, pat)
2657 unsigned int regno;
2658 rtx pat;
2659 {
2660 unsigned int hash = hash_set (regno, set_hash_table_size);
2661 struct expr *expr;
2662
2663 expr = set_hash_table[hash];
2664
2665 if (pat)
2666 {
2667 while (expr && ! expr_equiv_p (expr->expr, pat))
2668 expr = expr->next_same_hash;
2669 }
2670 else
2671 {
2672 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2673 expr = expr->next_same_hash;
2674 }
2675
2676 return expr;
2677 }
2678
2679 /* Return the next entry for REGNO in list EXPR. */
2680
2681 static struct expr *
2682 next_set (regno, expr)
2683 unsigned int regno;
2684 struct expr *expr;
2685 {
2686 do
2687 expr = expr->next_same_hash;
2688 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2689
2690 return expr;
2691 }
2692
2693 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2694 static void
2695 clear_modify_mem_tables ()
2696 {
2697 int i;
2698
2699 EXECUTE_IF_SET_IN_BITMAP
2700 (canon_modify_mem_list_set, 0, i,
2701 free_INSN_LIST_list (modify_mem_list + i));
2702 bitmap_clear (canon_modify_mem_list_set);
2703
2704 EXECUTE_IF_SET_IN_BITMAP
2705 (canon_modify_mem_list_set, 0, i,
2706 free_INSN_LIST_list (canon_modify_mem_list + i));
2707 bitmap_clear (modify_mem_list_set);
2708 }
2709
2710 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2711
2712 static void
2713 free_modify_mem_tables ()
2714 {
2715 clear_modify_mem_tables ();
2716 free (modify_mem_list);
2717 free (canon_modify_mem_list);
2718 modify_mem_list = 0;
2719 canon_modify_mem_list = 0;
2720 }
2721
2722 /* Reset tables used to keep track of what's still available [since the
2723 start of the block]. */
2724
2725 static void
2726 reset_opr_set_tables ()
2727 {
2728 /* Maintain a bitmap of which regs have been set since beginning of
2729 the block. */
2730 CLEAR_REG_SET (reg_set_bitmap);
2731
2732 /* Also keep a record of the last instruction to modify memory.
2733 For now this is very trivial, we only record whether any memory
2734 location has been modified. */
2735 clear_modify_mem_tables ();
2736 }
2737
2738 /* Return non-zero if the operands of X are not set before INSN in
2739 INSN's basic block. */
2740
2741 static int
2742 oprs_not_set_p (x, insn)
2743 rtx x, insn;
2744 {
2745 int i, j;
2746 enum rtx_code code;
2747 const char *fmt;
2748
2749 if (x == 0)
2750 return 1;
2751
2752 code = GET_CODE (x);
2753 switch (code)
2754 {
2755 case PC:
2756 case CC0:
2757 case CONST:
2758 case CONST_INT:
2759 case CONST_DOUBLE:
2760 case SYMBOL_REF:
2761 case LABEL_REF:
2762 case ADDR_VEC:
2763 case ADDR_DIFF_VEC:
2764 return 1;
2765
2766 case MEM:
2767 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2768 INSN_CUID (insn), x, 0))
2769 return 0;
2770 else
2771 return oprs_not_set_p (XEXP (x, 0), insn);
2772
2773 case REG:
2774 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2775
2776 default:
2777 break;
2778 }
2779
2780 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2781 {
2782 if (fmt[i] == 'e')
2783 {
2784 /* If we are about to do the last recursive call
2785 needed at this level, change it into iteration.
2786 This function is called enough to be worth it. */
2787 if (i == 0)
2788 return oprs_not_set_p (XEXP (x, i), insn);
2789
2790 if (! oprs_not_set_p (XEXP (x, i), insn))
2791 return 0;
2792 }
2793 else if (fmt[i] == 'E')
2794 for (j = 0; j < XVECLEN (x, i); j++)
2795 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2796 return 0;
2797 }
2798
2799 return 1;
2800 }
2801
2802 /* Mark things set by a CALL. */
2803
2804 static void
2805 mark_call (insn)
2806 rtx insn;
2807 {
2808 if (! CONST_OR_PURE_CALL_P (insn))
2809 record_last_mem_set_info (insn);
2810 }
2811
2812 /* Mark things set by a SET. */
2813
2814 static void
2815 mark_set (pat, insn)
2816 rtx pat, insn;
2817 {
2818 rtx dest = SET_DEST (pat);
2819
2820 while (GET_CODE (dest) == SUBREG
2821 || GET_CODE (dest) == ZERO_EXTRACT
2822 || GET_CODE (dest) == SIGN_EXTRACT
2823 || GET_CODE (dest) == STRICT_LOW_PART)
2824 dest = XEXP (dest, 0);
2825
2826 if (GET_CODE (dest) == REG)
2827 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2828 else if (GET_CODE (dest) == MEM)
2829 record_last_mem_set_info (insn);
2830
2831 if (GET_CODE (SET_SRC (pat)) == CALL)
2832 mark_call (insn);
2833 }
2834
2835 /* Record things set by a CLOBBER. */
2836
2837 static void
2838 mark_clobber (pat, insn)
2839 rtx pat, insn;
2840 {
2841 rtx clob = XEXP (pat, 0);
2842
2843 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2844 clob = XEXP (clob, 0);
2845
2846 if (GET_CODE (clob) == REG)
2847 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2848 else
2849 record_last_mem_set_info (insn);
2850 }
2851
2852 /* Record things set by INSN.
2853 This data is used by oprs_not_set_p. */
2854
2855 static void
2856 mark_oprs_set (insn)
2857 rtx insn;
2858 {
2859 rtx pat = PATTERN (insn);
2860 int i;
2861
2862 if (GET_CODE (pat) == SET)
2863 mark_set (pat, insn);
2864 else if (GET_CODE (pat) == PARALLEL)
2865 for (i = 0; i < XVECLEN (pat, 0); i++)
2866 {
2867 rtx x = XVECEXP (pat, 0, i);
2868
2869 if (GET_CODE (x) == SET)
2870 mark_set (x, insn);
2871 else if (GET_CODE (x) == CLOBBER)
2872 mark_clobber (x, insn);
2873 else if (GET_CODE (x) == CALL)
2874 mark_call (insn);
2875 }
2876
2877 else if (GET_CODE (pat) == CLOBBER)
2878 mark_clobber (pat, insn);
2879 else if (GET_CODE (pat) == CALL)
2880 mark_call (insn);
2881 }
2882
2883 \f
2884 /* Classic GCSE reaching definition support. */
2885
2886 /* Allocate reaching def variables. */
2887
2888 static void
2889 alloc_rd_mem (n_blocks, n_insns)
2890 int n_blocks, n_insns;
2891 {
2892 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2893 sbitmap_vector_zero (rd_kill, n_basic_blocks);
2894
2895 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2896 sbitmap_vector_zero (rd_gen, n_basic_blocks);
2897
2898 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2899 sbitmap_vector_zero (reaching_defs, n_basic_blocks);
2900
2901 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2902 sbitmap_vector_zero (rd_out, n_basic_blocks);
2903 }
2904
2905 /* Free reaching def variables. */
2906
2907 static void
2908 free_rd_mem ()
2909 {
2910 sbitmap_vector_free (rd_kill);
2911 sbitmap_vector_free (rd_gen);
2912 sbitmap_vector_free (reaching_defs);
2913 sbitmap_vector_free (rd_out);
2914 }
2915
2916 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2917
2918 static void
2919 handle_rd_kill_set (insn, regno, bb)
2920 rtx insn;
2921 int regno;
2922 basic_block bb;
2923 {
2924 struct reg_set *this_reg;
2925
2926 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2927 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2928 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2929 }
2930
2931 /* Compute the set of kill's for reaching definitions. */
2932
2933 static void
2934 compute_kill_rd ()
2935 {
2936 int bb, cuid;
2937 unsigned int regno;
2938 int i;
2939
2940 /* For each block
2941 For each set bit in `gen' of the block (i.e each insn which
2942 generates a definition in the block)
2943 Call the reg set by the insn corresponding to that bit regx
2944 Look at the linked list starting at reg_set_table[regx]
2945 For each setting of regx in the linked list, which is not in
2946 this block
2947 Set the bit in `kill' corresponding to that insn. */
2948 for (bb = 0; bb < n_basic_blocks; bb++)
2949 for (cuid = 0; cuid < max_cuid; cuid++)
2950 if (TEST_BIT (rd_gen[bb], cuid))
2951 {
2952 rtx insn = CUID_INSN (cuid);
2953 rtx pat = PATTERN (insn);
2954
2955 if (GET_CODE (insn) == CALL_INSN)
2956 {
2957 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2958 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2959 handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
2960 }
2961
2962 if (GET_CODE (pat) == PARALLEL)
2963 {
2964 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2965 {
2966 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2967
2968 if ((code == SET || code == CLOBBER)
2969 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2970 handle_rd_kill_set (insn,
2971 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2972 BASIC_BLOCK (bb));
2973 }
2974 }
2975 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2976 /* Each setting of this register outside of this block
2977 must be marked in the set of kills in this block. */
2978 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
2979 }
2980 }
2981
2982 /* Compute the reaching definitions as in
2983 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2984 Chapter 10. It is the same algorithm as used for computing available
2985 expressions but applied to the gens and kills of reaching definitions. */
2986
2987 static void
2988 compute_rd ()
2989 {
2990 int bb, changed, passes;
2991
2992 for (bb = 0; bb < n_basic_blocks; bb++)
2993 sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
2994
2995 passes = 0;
2996 changed = 1;
2997 while (changed)
2998 {
2999 changed = 0;
3000 for (bb = 0; bb < n_basic_blocks; bb++)
3001 {
3002 sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
3003 changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
3004 reaching_defs[bb], rd_kill[bb]);
3005 }
3006 passes++;
3007 }
3008
3009 if (gcse_file)
3010 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3011 }
3012 \f
3013 /* Classic GCSE available expression support. */
3014
3015 /* Allocate memory for available expression computation. */
3016
3017 static void
3018 alloc_avail_expr_mem (n_blocks, n_exprs)
3019 int n_blocks, n_exprs;
3020 {
3021 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3022 sbitmap_vector_zero (ae_kill, n_basic_blocks);
3023
3024 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3025 sbitmap_vector_zero (ae_gen, n_basic_blocks);
3026
3027 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3028 sbitmap_vector_zero (ae_in, n_basic_blocks);
3029
3030 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3031 sbitmap_vector_zero (ae_out, n_basic_blocks);
3032 }
3033
3034 static void
3035 free_avail_expr_mem ()
3036 {
3037 sbitmap_vector_free (ae_kill);
3038 sbitmap_vector_free (ae_gen);
3039 sbitmap_vector_free (ae_in);
3040 sbitmap_vector_free (ae_out);
3041 }
3042
3043 /* Compute the set of available expressions generated in each basic block. */
3044
3045 static void
3046 compute_ae_gen ()
3047 {
3048 unsigned int i;
3049 struct expr *expr;
3050 struct occr *occr;
3051
3052 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3053 This is all we have to do because an expression is not recorded if it
3054 is not available, and the only expressions we want to work with are the
3055 ones that are recorded. */
3056 for (i = 0; i < expr_hash_table_size; i++)
3057 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
3058 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3059 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3060 }
3061
3062 /* Return non-zero if expression X is killed in BB. */
3063
3064 static int
3065 expr_killed_p (x, bb)
3066 rtx x;
3067 basic_block bb;
3068 {
3069 int i, j;
3070 enum rtx_code code;
3071 const char *fmt;
3072
3073 if (x == 0)
3074 return 1;
3075
3076 code = GET_CODE (x);
3077 switch (code)
3078 {
3079 case REG:
3080 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3081
3082 case MEM:
3083 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3084 return 1;
3085 else
3086 return expr_killed_p (XEXP (x, 0), bb);
3087
3088 case PC:
3089 case CC0: /*FIXME*/
3090 case CONST:
3091 case CONST_INT:
3092 case CONST_DOUBLE:
3093 case SYMBOL_REF:
3094 case LABEL_REF:
3095 case ADDR_VEC:
3096 case ADDR_DIFF_VEC:
3097 return 0;
3098
3099 default:
3100 break;
3101 }
3102
3103 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3104 {
3105 if (fmt[i] == 'e')
3106 {
3107 /* If we are about to do the last recursive call
3108 needed at this level, change it into iteration.
3109 This function is called enough to be worth it. */
3110 if (i == 0)
3111 return expr_killed_p (XEXP (x, i), bb);
3112 else if (expr_killed_p (XEXP (x, i), bb))
3113 return 1;
3114 }
3115 else if (fmt[i] == 'E')
3116 for (j = 0; j < XVECLEN (x, i); j++)
3117 if (expr_killed_p (XVECEXP (x, i, j), bb))
3118 return 1;
3119 }
3120
3121 return 0;
3122 }
3123
3124 /* Compute the set of available expressions killed in each basic block. */
3125
3126 static void
3127 compute_ae_kill (ae_gen, ae_kill)
3128 sbitmap *ae_gen, *ae_kill;
3129 {
3130 int bb;
3131 unsigned int i;
3132 struct expr *expr;
3133
3134 for (bb = 0; bb < n_basic_blocks; bb++)
3135 for (i = 0; i < expr_hash_table_size; i++)
3136 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
3137 {
3138 /* Skip EXPR if generated in this block. */
3139 if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
3140 continue;
3141
3142 if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
3143 SET_BIT (ae_kill[bb], expr->bitmap_index);
3144 }
3145 }
3146 \f
3147 /* Actually perform the Classic GCSE optimizations. */
3148
3149 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3150
3151 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3152 as a positive reach. We want to do this when there are two computations
3153 of the expression in the block.
3154
3155 VISITED is a pointer to a working buffer for tracking which BB's have
3156 been visited. It is NULL for the top-level call.
3157
3158 We treat reaching expressions that go through blocks containing the same
3159 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3160 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3161 2 as not reaching. The intent is to improve the probability of finding
3162 only one reaching expression and to reduce register lifetimes by picking
3163 the closest such expression. */
3164
3165 static int
3166 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3167 struct occr *occr;
3168 struct expr *expr;
3169 basic_block bb;
3170 int check_self_loop;
3171 char *visited;
3172 {
3173 edge pred;
3174
3175 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3176 {
3177 basic_block pred_bb = pred->src;
3178
3179 if (visited[pred_bb->index])
3180 /* This predecessor has already been visited. Nothing to do. */
3181 ;
3182 else if (pred_bb == bb)
3183 {
3184 /* BB loops on itself. */
3185 if (check_self_loop
3186 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3187 && BLOCK_NUM (occr->insn) == pred_bb->index)
3188 return 1;
3189
3190 visited[pred_bb->index] = 1;
3191 }
3192
3193 /* Ignore this predecessor if it kills the expression. */
3194 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3195 visited[pred_bb->index] = 1;
3196
3197 /* Does this predecessor generate this expression? */
3198 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3199 {
3200 /* Is this the occurrence we're looking for?
3201 Note that there's only one generating occurrence per block
3202 so we just need to check the block number. */
3203 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3204 return 1;
3205
3206 visited[pred_bb->index] = 1;
3207 }
3208
3209 /* Neither gen nor kill. */
3210 else
3211 {
3212 visited[pred_bb->index] = 1;
3213 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3214 visited))
3215
3216 return 1;
3217 }
3218 }
3219
3220 /* All paths have been checked. */
3221 return 0;
3222 }
3223
3224 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3225 memory allocated for that function is returned. */
3226
3227 static int
3228 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3229 struct occr *occr;
3230 struct expr *expr;
3231 basic_block bb;
3232 int check_self_loop;
3233 {
3234 int rval;
3235 char *visited = (char *) xcalloc (n_basic_blocks, 1);
3236
3237 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3238
3239 free (visited);
3240 return rval;
3241 }
3242
3243 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3244 If there is more than one such instruction, return NULL.
3245
3246 Called only by handle_avail_expr. */
3247
3248 static rtx
3249 computing_insn (expr, insn)
3250 struct expr *expr;
3251 rtx insn;
3252 {
3253 basic_block bb = BLOCK_FOR_INSN (insn);
3254
3255 if (expr->avail_occr->next == NULL)
3256 {
3257 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3258 /* The available expression is actually itself
3259 (i.e. a loop in the flow graph) so do nothing. */
3260 return NULL;
3261
3262 /* (FIXME) Case that we found a pattern that was created by
3263 a substitution that took place. */
3264 return expr->avail_occr->insn;
3265 }
3266 else
3267 {
3268 /* Pattern is computed more than once.
3269 Search backwards from this insn to see how many of these
3270 computations actually reach this insn. */
3271 struct occr *occr;
3272 rtx insn_computes_expr = NULL;
3273 int can_reach = 0;
3274
3275 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3276 {
3277 if (BLOCK_FOR_INSN (occr->insn) == bb)
3278 {
3279 /* The expression is generated in this block.
3280 The only time we care about this is when the expression
3281 is generated later in the block [and thus there's a loop].
3282 We let the normal cse pass handle the other cases. */
3283 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3284 && expr_reaches_here_p (occr, expr, bb, 1))
3285 {
3286 can_reach++;
3287 if (can_reach > 1)
3288 return NULL;
3289
3290 insn_computes_expr = occr->insn;
3291 }
3292 }
3293 else if (expr_reaches_here_p (occr, expr, bb, 0))
3294 {
3295 can_reach++;
3296 if (can_reach > 1)
3297 return NULL;
3298
3299 insn_computes_expr = occr->insn;
3300 }
3301 }
3302
3303 if (insn_computes_expr == NULL)
3304 abort ();
3305
3306 return insn_computes_expr;
3307 }
3308 }
3309
3310 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3311 Only called by can_disregard_other_sets. */
3312
3313 static int
3314 def_reaches_here_p (insn, def_insn)
3315 rtx insn, def_insn;
3316 {
3317 rtx reg;
3318
3319 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3320 return 1;
3321
3322 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3323 {
3324 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3325 {
3326 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3327 return 1;
3328 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3329 reg = XEXP (PATTERN (def_insn), 0);
3330 else if (GET_CODE (PATTERN (def_insn)) == SET)
3331 reg = SET_DEST (PATTERN (def_insn));
3332 else
3333 abort ();
3334
3335 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3336 }
3337 else
3338 return 0;
3339 }
3340
3341 return 0;
3342 }
3343
3344 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3345 value returned is the number of definitions that reach INSN. Returning a
3346 value of zero means that [maybe] more than one definition reaches INSN and
3347 the caller can't perform whatever optimization it is trying. i.e. it is
3348 always safe to return zero. */
3349
3350 static int
3351 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3352 struct reg_set **addr_this_reg;
3353 rtx insn;
3354 int for_combine;
3355 {
3356 int number_of_reaching_defs = 0;
3357 struct reg_set *this_reg;
3358
3359 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3360 if (def_reaches_here_p (insn, this_reg->insn))
3361 {
3362 number_of_reaching_defs++;
3363 /* Ignore parallels for now. */
3364 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3365 return 0;
3366
3367 if (!for_combine
3368 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3369 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3370 SET_SRC (PATTERN (insn)))))
3371 /* A setting of the reg to a different value reaches INSN. */
3372 return 0;
3373
3374 if (number_of_reaching_defs > 1)
3375 {
3376 /* If in this setting the value the register is being set to is
3377 equal to the previous value the register was set to and this
3378 setting reaches the insn we are trying to do the substitution
3379 on then we are ok. */
3380 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3381 return 0;
3382 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3383 SET_SRC (PATTERN (insn))))
3384 return 0;
3385 }
3386
3387 *addr_this_reg = this_reg;
3388 }
3389
3390 return number_of_reaching_defs;
3391 }
3392
3393 /* Expression computed by insn is available and the substitution is legal,
3394 so try to perform the substitution.
3395
3396 The result is non-zero if any changes were made. */
3397
3398 static int
3399 handle_avail_expr (insn, expr)
3400 rtx insn;
3401 struct expr *expr;
3402 {
3403 rtx pat, insn_computes_expr, expr_set;
3404 rtx to;
3405 struct reg_set *this_reg;
3406 int found_setting, use_src;
3407 int changed = 0;
3408
3409 /* We only handle the case where one computation of the expression
3410 reaches this instruction. */
3411 insn_computes_expr = computing_insn (expr, insn);
3412 if (insn_computes_expr == NULL)
3413 return 0;
3414 expr_set = single_set (insn_computes_expr);
3415 if (!expr_set)
3416 abort ();
3417
3418 found_setting = 0;
3419 use_src = 0;
3420
3421 /* At this point we know only one computation of EXPR outside of this
3422 block reaches this insn. Now try to find a register that the
3423 expression is computed into. */
3424 if (GET_CODE (SET_SRC (expr_set)) == REG)
3425 {
3426 /* This is the case when the available expression that reaches
3427 here has already been handled as an available expression. */
3428 unsigned int regnum_for_replacing
3429 = REGNO (SET_SRC (expr_set));
3430
3431 /* If the register was created by GCSE we can't use `reg_set_table',
3432 however we know it's set only once. */
3433 if (regnum_for_replacing >= max_gcse_regno
3434 /* If the register the expression is computed into is set only once,
3435 or only one set reaches this insn, we can use it. */
3436 || (((this_reg = reg_set_table[regnum_for_replacing]),
3437 this_reg->next == NULL)
3438 || can_disregard_other_sets (&this_reg, insn, 0)))
3439 {
3440 use_src = 1;
3441 found_setting = 1;
3442 }
3443 }
3444
3445 if (!found_setting)
3446 {
3447 unsigned int regnum_for_replacing
3448 = REGNO (SET_DEST (expr_set));
3449
3450 /* This shouldn't happen. */
3451 if (regnum_for_replacing >= max_gcse_regno)
3452 abort ();
3453
3454 this_reg = reg_set_table[regnum_for_replacing];
3455
3456 /* If the register the expression is computed into is set only once,
3457 or only one set reaches this insn, use it. */
3458 if (this_reg->next == NULL
3459 || can_disregard_other_sets (&this_reg, insn, 0))
3460 found_setting = 1;
3461 }
3462
3463 if (found_setting)
3464 {
3465 pat = PATTERN (insn);
3466 if (use_src)
3467 to = SET_SRC (expr_set);
3468 else
3469 to = SET_DEST (expr_set);
3470 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3471
3472 /* We should be able to ignore the return code from validate_change but
3473 to play it safe we check. */
3474 if (changed)
3475 {
3476 gcse_subst_count++;
3477 if (gcse_file != NULL)
3478 {
3479 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3480 INSN_UID (insn));
3481 fprintf (gcse_file, " reg %d %s insn %d\n",
3482 REGNO (to), use_src ? "from" : "set in",
3483 INSN_UID (insn_computes_expr));
3484 }
3485 }
3486 }
3487
3488 /* The register that the expr is computed into is set more than once. */
3489 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3490 {
3491 /* Insert an insn after insnx that copies the reg set in insnx
3492 into a new pseudo register call this new register REGN.
3493 From insnb until end of basic block or until REGB is set
3494 replace all uses of REGB with REGN. */
3495 rtx new_insn;
3496
3497 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3498
3499 /* Generate the new insn. */
3500 /* ??? If the change fails, we return 0, even though we created
3501 an insn. I think this is ok. */
3502 new_insn
3503 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3504 SET_DEST (expr_set)),
3505 insn_computes_expr);
3506
3507 /* Keep register set table up to date. */
3508 record_one_set (REGNO (to), new_insn);
3509
3510 gcse_create_count++;
3511 if (gcse_file != NULL)
3512 {
3513 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3514 INSN_UID (NEXT_INSN (insn_computes_expr)),
3515 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3516 fprintf (gcse_file, ", computed in insn %d,\n",
3517 INSN_UID (insn_computes_expr));
3518 fprintf (gcse_file, " into newly allocated reg %d\n",
3519 REGNO (to));
3520 }
3521
3522 pat = PATTERN (insn);
3523
3524 /* Do register replacement for INSN. */
3525 changed = validate_change (insn, &SET_SRC (pat),
3526 SET_DEST (PATTERN
3527 (NEXT_INSN (insn_computes_expr))),
3528 0);
3529
3530 /* We should be able to ignore the return code from validate_change but
3531 to play it safe we check. */
3532 if (changed)
3533 {
3534 gcse_subst_count++;
3535 if (gcse_file != NULL)
3536 {
3537 fprintf (gcse_file,
3538 "GCSE: Replacing the source in insn %d with reg %d ",
3539 INSN_UID (insn),
3540 REGNO (SET_DEST (PATTERN (NEXT_INSN
3541 (insn_computes_expr)))));
3542 fprintf (gcse_file, "set in insn %d\n",
3543 INSN_UID (insn_computes_expr));
3544 }
3545 }
3546 }
3547
3548 return changed;
3549 }
3550
3551 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3552 the dataflow analysis has been done.
3553
3554 The result is non-zero if a change was made. */
3555
3556 static int
3557 classic_gcse ()
3558 {
3559 int bb, changed;
3560 rtx insn;
3561
3562 /* Note we start at block 1. */
3563
3564 changed = 0;
3565 for (bb = 1; bb < n_basic_blocks; bb++)
3566 {
3567 /* Reset tables used to keep track of what's still valid [since the
3568 start of the block]. */
3569 reset_opr_set_tables ();
3570
3571 for (insn = BLOCK_HEAD (bb);
3572 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3573 insn = NEXT_INSN (insn))
3574 {
3575 /* Is insn of form (set (pseudo-reg) ...)? */
3576 if (GET_CODE (insn) == INSN
3577 && GET_CODE (PATTERN (insn)) == SET
3578 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3579 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3580 {
3581 rtx pat = PATTERN (insn);
3582 rtx src = SET_SRC (pat);
3583 struct expr *expr;
3584
3585 if (want_to_gcse_p (src)
3586 /* Is the expression recorded? */
3587 && ((expr = lookup_expr (src)) != NULL)
3588 /* Is the expression available [at the start of the
3589 block]? */
3590 && TEST_BIT (ae_in[bb], expr->bitmap_index)
3591 /* Are the operands unchanged since the start of the
3592 block? */
3593 && oprs_not_set_p (src, insn))
3594 changed |= handle_avail_expr (insn, expr);
3595 }
3596
3597 /* Keep track of everything modified by this insn. */
3598 /* ??? Need to be careful w.r.t. mods done to INSN. */
3599 if (INSN_P (insn))
3600 mark_oprs_set (insn);
3601 }
3602 }
3603
3604 return changed;
3605 }
3606
3607 /* Top level routine to perform one classic GCSE pass.
3608
3609 Return non-zero if a change was made. */
3610
3611 static int
3612 one_classic_gcse_pass (pass)
3613 int pass;
3614 {
3615 int changed = 0;
3616
3617 gcse_subst_count = 0;
3618 gcse_create_count = 0;
3619
3620 alloc_expr_hash_table (max_cuid);
3621 alloc_rd_mem (n_basic_blocks, max_cuid);
3622 compute_expr_hash_table ();
3623 if (gcse_file)
3624 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3625 expr_hash_table_size, n_exprs);
3626
3627 if (n_exprs > 0)
3628 {
3629 compute_kill_rd ();
3630 compute_rd ();
3631 alloc_avail_expr_mem (n_basic_blocks, n_exprs);
3632 compute_ae_gen ();
3633 compute_ae_kill (ae_gen, ae_kill);
3634 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3635 changed = classic_gcse ();
3636 free_avail_expr_mem ();
3637 }
3638
3639 free_rd_mem ();
3640 free_expr_hash_table ();
3641
3642 if (gcse_file)
3643 {
3644 fprintf (gcse_file, "\n");
3645 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3646 current_function_name, pass, bytes_used, gcse_subst_count);
3647 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3648 }
3649
3650 return changed;
3651 }
3652 \f
3653 /* Compute copy/constant propagation working variables. */
3654
3655 /* Local properties of assignments. */
3656 static sbitmap *cprop_pavloc;
3657 static sbitmap *cprop_absaltered;
3658
3659 /* Global properties of assignments (computed from the local properties). */
3660 static sbitmap *cprop_avin;
3661 static sbitmap *cprop_avout;
3662
3663 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3664 basic blocks. N_SETS is the number of sets. */
3665
3666 static void
3667 alloc_cprop_mem (n_blocks, n_sets)
3668 int n_blocks, n_sets;
3669 {
3670 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3671 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3672
3673 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3674 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3675 }
3676
3677 /* Free vars used by copy/const propagation. */
3678
3679 static void
3680 free_cprop_mem ()
3681 {
3682 sbitmap_vector_free (cprop_pavloc);
3683 sbitmap_vector_free (cprop_absaltered);
3684 sbitmap_vector_free (cprop_avin);
3685 sbitmap_vector_free (cprop_avout);
3686 }
3687
3688 /* For each block, compute whether X is transparent. X is either an
3689 expression or an assignment [though we don't care which, for this context
3690 an assignment is treated as an expression]. For each block where an
3691 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3692 bit in BMAP. */
3693
3694 static void
3695 compute_transp (x, indx, bmap, set_p)
3696 rtx x;
3697 int indx;
3698 sbitmap *bmap;
3699 int set_p;
3700 {
3701 int bb, i, j;
3702 enum rtx_code code;
3703 reg_set *r;
3704 const char *fmt;
3705
3706 /* repeat is used to turn tail-recursion into iteration since GCC
3707 can't do it when there's no return value. */
3708 repeat:
3709
3710 if (x == 0)
3711 return;
3712
3713 code = GET_CODE (x);
3714 switch (code)
3715 {
3716 case REG:
3717 if (set_p)
3718 {
3719 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3720 {
3721 for (bb = 0; bb < n_basic_blocks; bb++)
3722 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3723 SET_BIT (bmap[bb], indx);
3724 }
3725 else
3726 {
3727 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3728 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3729 }
3730 }
3731 else
3732 {
3733 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3734 {
3735 for (bb = 0; bb < n_basic_blocks; bb++)
3736 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3737 RESET_BIT (bmap[bb], indx);
3738 }
3739 else
3740 {
3741 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3742 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3743 }
3744 }
3745
3746 return;
3747
3748 case MEM:
3749 for (bb = 0; bb < n_basic_blocks; bb++)
3750 {
3751 rtx list_entry = canon_modify_mem_list[bb];
3752
3753 while (list_entry)
3754 {
3755 rtx dest, dest_addr;
3756
3757 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3758 {
3759 if (set_p)
3760 SET_BIT (bmap[bb], indx);
3761 else
3762 RESET_BIT (bmap[bb], indx);
3763 break;
3764 }
3765 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3766 Examine each hunk of memory that is modified. */
3767
3768 dest = XEXP (list_entry, 0);
3769 list_entry = XEXP (list_entry, 1);
3770 dest_addr = XEXP (list_entry, 0);
3771
3772 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3773 x, rtx_addr_varies_p))
3774 {
3775 if (set_p)
3776 SET_BIT (bmap[bb], indx);
3777 else
3778 RESET_BIT (bmap[bb], indx);
3779 break;
3780 }
3781 list_entry = XEXP (list_entry, 1);
3782 }
3783 }
3784
3785 x = XEXP (x, 0);
3786 goto repeat;
3787
3788 case PC:
3789 case CC0: /*FIXME*/
3790 case CONST:
3791 case CONST_INT:
3792 case CONST_DOUBLE:
3793 case SYMBOL_REF:
3794 case LABEL_REF:
3795 case ADDR_VEC:
3796 case ADDR_DIFF_VEC:
3797 return;
3798
3799 default:
3800 break;
3801 }
3802
3803 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3804 {
3805 if (fmt[i] == 'e')
3806 {
3807 /* If we are about to do the last recursive call
3808 needed at this level, change it into iteration.
3809 This function is called enough to be worth it. */
3810 if (i == 0)
3811 {
3812 x = XEXP (x, i);
3813 goto repeat;
3814 }
3815
3816 compute_transp (XEXP (x, i), indx, bmap, set_p);
3817 }
3818 else if (fmt[i] == 'E')
3819 for (j = 0; j < XVECLEN (x, i); j++)
3820 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3821 }
3822 }
3823
3824 /* Top level routine to do the dataflow analysis needed by copy/const
3825 propagation. */
3826
3827 static void
3828 compute_cprop_data ()
3829 {
3830 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3831 compute_available (cprop_pavloc, cprop_absaltered,
3832 cprop_avout, cprop_avin);
3833 }
3834 \f
3835 /* Copy/constant propagation. */
3836
3837 /* Maximum number of register uses in an insn that we handle. */
3838 #define MAX_USES 8
3839
3840 /* Table of uses found in an insn.
3841 Allocated statically to avoid alloc/free complexity and overhead. */
3842 static struct reg_use reg_use_table[MAX_USES];
3843
3844 /* Index into `reg_use_table' while building it. */
3845 static int reg_use_count;
3846
3847 /* Set up a list of register numbers used in INSN. The found uses are stored
3848 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3849 and contains the number of uses in the table upon exit.
3850
3851 ??? If a register appears multiple times we will record it multiple times.
3852 This doesn't hurt anything but it will slow things down. */
3853
3854 static void
3855 find_used_regs (xptr, data)
3856 rtx *xptr;
3857 void *data ATTRIBUTE_UNUSED;
3858 {
3859 int i, j;
3860 enum rtx_code code;
3861 const char *fmt;
3862 rtx x = *xptr;
3863
3864 /* repeat is used to turn tail-recursion into iteration since GCC
3865 can't do it when there's no return value. */
3866 repeat:
3867 if (x == 0)
3868 return;
3869
3870 code = GET_CODE (x);
3871 if (REG_P (x))
3872 {
3873 if (reg_use_count == MAX_USES)
3874 return;
3875
3876 reg_use_table[reg_use_count].reg_rtx = x;
3877 reg_use_count++;
3878 }
3879
3880 /* Recursively scan the operands of this expression. */
3881
3882 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3883 {
3884 if (fmt[i] == 'e')
3885 {
3886 /* If we are about to do the last recursive call
3887 needed at this level, change it into iteration.
3888 This function is called enough to be worth it. */
3889 if (i == 0)
3890 {
3891 x = XEXP (x, 0);
3892 goto repeat;
3893 }
3894
3895 find_used_regs (&XEXP (x, i), data);
3896 }
3897 else if (fmt[i] == 'E')
3898 for (j = 0; j < XVECLEN (x, i); j++)
3899 find_used_regs (&XVECEXP (x, i, j), data);
3900 }
3901 }
3902
3903 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3904 Returns non-zero is successful. */
3905
3906 static int
3907 try_replace_reg (from, to, insn)
3908 rtx from, to, insn;
3909 {
3910 rtx note = find_reg_equal_equiv_note (insn);
3911 rtx src = 0;
3912 int success = 0;
3913 rtx set = single_set (insn);
3914
3915 success = validate_replace_src (from, to, insn);
3916
3917 /* If above failed and this is a single set, try to simplify the source of
3918 the set given our substitution. We could perhaps try this for multiple
3919 SETs, but it probably won't buy us anything. */
3920 if (!success && set != 0)
3921 {
3922 src = simplify_replace_rtx (SET_SRC (set), from, to);
3923
3924 if (!rtx_equal_p (src, SET_SRC (set))
3925 && validate_change (insn, &SET_SRC (set), src, 0))
3926 success = 1;
3927 }
3928
3929 /* If we've failed to do replacement, have a single SET, and don't already
3930 have a note, add a REG_EQUAL note to not lose information. */
3931 if (!success && note == 0 && set != 0)
3932 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3933
3934 /* If there is already a NOTE, update the expression in it with our
3935 replacement. */
3936 else if (note != 0)
3937 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3938
3939 /* REG_EQUAL may get simplified into register.
3940 We don't allow that. Remove that note. This code ought
3941 not to hapen, because previous code ought to syntetize
3942 reg-reg move, but be on the safe side. */
3943 if (note && REG_P (XEXP (note, 0)))
3944 remove_note (insn, note);
3945
3946 return success;
3947 }
3948
3949 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3950 NULL no such set is found. */
3951
3952 static struct expr *
3953 find_avail_set (regno, insn)
3954 int regno;
3955 rtx insn;
3956 {
3957 /* SET1 contains the last set found that can be returned to the caller for
3958 use in a substitution. */
3959 struct expr *set1 = 0;
3960
3961 /* Loops are not possible here. To get a loop we would need two sets
3962 available at the start of the block containing INSN. ie we would
3963 need two sets like this available at the start of the block:
3964
3965 (set (reg X) (reg Y))
3966 (set (reg Y) (reg X))
3967
3968 This can not happen since the set of (reg Y) would have killed the
3969 set of (reg X) making it unavailable at the start of this block. */
3970 while (1)
3971 {
3972 rtx src;
3973 struct expr *set = lookup_set (regno, NULL_RTX);
3974
3975 /* Find a set that is available at the start of the block
3976 which contains INSN. */
3977 while (set)
3978 {
3979 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3980 break;
3981 set = next_set (regno, set);
3982 }
3983
3984 /* If no available set was found we've reached the end of the
3985 (possibly empty) copy chain. */
3986 if (set == 0)
3987 break;
3988
3989 if (GET_CODE (set->expr) != SET)
3990 abort ();
3991
3992 src = SET_SRC (set->expr);
3993
3994 /* We know the set is available.
3995 Now check that SRC is ANTLOC (i.e. none of the source operands
3996 have changed since the start of the block).
3997
3998 If the source operand changed, we may still use it for the next
3999 iteration of this loop, but we may not use it for substitutions. */
4000
4001 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4002 set1 = set;
4003
4004 /* If the source of the set is anything except a register, then
4005 we have reached the end of the copy chain. */
4006 if (GET_CODE (src) != REG)
4007 break;
4008
4009 /* Follow the copy chain, ie start another iteration of the loop
4010 and see if we have an available copy into SRC. */
4011 regno = REGNO (src);
4012 }
4013
4014 /* SET1 holds the last set that was available and anticipatable at
4015 INSN. */
4016 return set1;
4017 }
4018
4019 /* Subroutine of cprop_insn that tries to propagate constants into
4020 JUMP_INSNS. INSN must be a conditional jump. FROM is what we will try to
4021 replace, SRC is the constant we will try to substitute for it. Returns
4022 nonzero if a change was made. We know INSN has just a SET. */
4023
4024 static int
4025 cprop_jump (bb, insn, from, src)
4026 rtx insn;
4027 rtx from;
4028 rtx src;
4029 basic_block bb;
4030 {
4031 rtx set = PATTERN (insn);
4032 rtx new = simplify_replace_rtx (SET_SRC (set), from, src);
4033
4034 /* If no simplification can be made, then try the next
4035 register. */
4036 if (rtx_equal_p (new, SET_SRC (set)))
4037 return 0;
4038
4039 /* If this is now a no-op leave it that way, but update LABEL_NUSED if
4040 necessary. */
4041 if (new == pc_rtx)
4042 {
4043 SET_SRC (set) = new;
4044
4045 if (JUMP_LABEL (insn) != 0)
4046 {
4047 --LABEL_NUSES (JUMP_LABEL (insn));
4048 JUMP_LABEL (insn) = NULL_RTX;
4049 }
4050 }
4051
4052 /* Otherwise, this must be a valid instruction. */
4053 else if (! validate_change (insn, &SET_SRC (set), new, 0))
4054 return 0;
4055
4056 /* If this has turned into an unconditional jump,
4057 then put a barrier after it so that the unreachable
4058 code will be deleted. */
4059 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4060 emit_barrier_after (insn);
4061
4062 run_jump_opt_after_gcse = 1;
4063
4064 const_prop_count++;
4065 if (gcse_file != NULL)
4066 {
4067 fprintf (gcse_file,
4068 "CONST-PROP: Replacing reg %d in insn %d with constant ",
4069 REGNO (from), INSN_UID (insn));
4070 print_rtl (gcse_file, src);
4071 fprintf (gcse_file, "\n");
4072 }
4073 purge_dead_edges (bb);
4074
4075 return 1;
4076 }
4077
4078 #ifdef HAVE_cc0
4079
4080 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
4081 for machines that have CC0. INSN is a single set that stores into CC0;
4082 the insn following it is a conditional jump. REG_USED is the use we will
4083 try to replace, SRC is the constant we will try to substitute for it.
4084 Returns nonzero if a change was made. */
4085
4086 static int
4087 cprop_cc0_jump (bb, insn, reg_used, src)
4088 basic_block bb;
4089 rtx insn;
4090 struct reg_use *reg_used;
4091 rtx src;
4092 {
4093 /* First substitute in the SET_SRC of INSN, then substitute that for
4094 CC0 in JUMP. */
4095 rtx jump = NEXT_INSN (insn);
4096 rtx new_src = simplify_replace_rtx (SET_SRC (PATTERN (insn)),
4097 reg_used->reg_rtx, src);
4098
4099 if (! cprop_jump (bb, jump, cc0_rtx, new_src))
4100 return 0;
4101
4102 /* If we succeeded, delete the cc0 setter. */
4103 delete_insn (insn);
4104
4105 return 1;
4106 }
4107 #endif
4108
4109 /* Perform constant and copy propagation on INSN.
4110 The result is non-zero if a change was made. */
4111
4112 static int
4113 cprop_insn (bb, insn, alter_jumps)
4114 basic_block bb;
4115 rtx insn;
4116 int alter_jumps;
4117 {
4118 struct reg_use *reg_used;
4119 int changed = 0;
4120 rtx note;
4121
4122 if (!INSN_P (insn))
4123 return 0;
4124
4125 reg_use_count = 0;
4126 note_uses (&PATTERN (insn), find_used_regs, NULL);
4127
4128 note = find_reg_equal_equiv_note (insn);
4129
4130 /* We may win even when propagating constants into notes. */
4131 if (note)
4132 find_used_regs (&XEXP (note, 0), NULL);
4133
4134 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4135 reg_used++, reg_use_count--)
4136 {
4137 unsigned int regno = REGNO (reg_used->reg_rtx);
4138 rtx pat, src;
4139 struct expr *set;
4140
4141 /* Ignore registers created by GCSE.
4142 We do this because ... */
4143 if (regno >= max_gcse_regno)
4144 continue;
4145
4146 /* If the register has already been set in this block, there's
4147 nothing we can do. */
4148 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4149 continue;
4150
4151 /* Find an assignment that sets reg_used and is available
4152 at the start of the block. */
4153 set = find_avail_set (regno, insn);
4154 if (! set)
4155 continue;
4156
4157 pat = set->expr;
4158 /* ??? We might be able to handle PARALLELs. Later. */
4159 if (GET_CODE (pat) != SET)
4160 abort ();
4161
4162 src = SET_SRC (pat);
4163
4164 /* Constant propagation. */
4165 if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE
4166 || GET_CODE (src) == SYMBOL_REF)
4167 {
4168 /* Handle normal insns first. */
4169 if (GET_CODE (insn) == INSN
4170 && try_replace_reg (reg_used->reg_rtx, src, insn))
4171 {
4172 changed = 1;
4173 const_prop_count++;
4174 if (gcse_file != NULL)
4175 {
4176 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
4177 regno);
4178 fprintf (gcse_file, "insn %d with constant ",
4179 INSN_UID (insn));
4180 print_rtl (gcse_file, src);
4181 fprintf (gcse_file, "\n");
4182 }
4183
4184 /* The original insn setting reg_used may or may not now be
4185 deletable. We leave the deletion to flow. */
4186 }
4187
4188 /* Try to propagate a CONST_INT into a conditional jump.
4189 We're pretty specific about what we will handle in this
4190 code, we can extend this as necessary over time.
4191
4192 Right now the insn in question must look like
4193 (set (pc) (if_then_else ...)) */
4194 else if (alter_jumps
4195 && GET_CODE (insn) == JUMP_INSN
4196 && condjump_p (insn)
4197 && ! simplejump_p (insn))
4198 changed |= cprop_jump (bb, insn, reg_used->reg_rtx, src);
4199
4200 #ifdef HAVE_cc0
4201 /* Similar code for machines that use a pair of CC0 setter and
4202 conditional jump insn. */
4203 else if (alter_jumps
4204 && GET_CODE (PATTERN (insn)) == SET
4205 && SET_DEST (PATTERN (insn)) == cc0_rtx
4206 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
4207 && condjump_p (NEXT_INSN (insn))
4208 && ! simplejump_p (NEXT_INSN (insn))
4209 && cprop_cc0_jump (bb, insn, reg_used, src))
4210 {
4211 changed = 1;
4212 break;
4213 }
4214 #endif
4215 }
4216 else if (GET_CODE (src) == REG
4217 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4218 && REGNO (src) != regno)
4219 {
4220 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4221 {
4222 changed = 1;
4223 copy_prop_count++;
4224 if (gcse_file != NULL)
4225 {
4226 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
4227 regno, INSN_UID (insn));
4228 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4229 }
4230
4231 /* The original insn setting reg_used may or may not now be
4232 deletable. We leave the deletion to flow. */
4233 /* FIXME: If it turns out that the insn isn't deletable,
4234 then we may have unnecessarily extended register lifetimes
4235 and made things worse. */
4236 }
4237 }
4238 }
4239
4240 return changed;
4241 }
4242
4243 /* Forward propagate copies. This includes copies and constants. Return
4244 non-zero if a change was made. */
4245
4246 static int
4247 cprop (alter_jumps)
4248 int alter_jumps;
4249 {
4250 int bb, changed;
4251 rtx insn;
4252
4253 /* Note we start at block 1. */
4254
4255 changed = 0;
4256 for (bb = 1; bb < n_basic_blocks; bb++)
4257 {
4258 /* Reset tables used to keep track of what's still valid [since the
4259 start of the block]. */
4260 reset_opr_set_tables ();
4261
4262 for (insn = BLOCK_HEAD (bb);
4263 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
4264 insn = NEXT_INSN (insn))
4265 if (INSN_P (insn))
4266 {
4267 changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
4268
4269 /* Keep track of everything modified by this insn. */
4270 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4271 call mark_oprs_set if we turned the insn into a NOTE. */
4272 if (GET_CODE (insn) != NOTE)
4273 mark_oprs_set (insn);
4274 }
4275 }
4276
4277 if (gcse_file != NULL)
4278 fprintf (gcse_file, "\n");
4279
4280 return changed;
4281 }
4282
4283 /* Perform one copy/constant propagation pass.
4284 F is the first insn in the function.
4285 PASS is the pass count. */
4286
4287 static int
4288 one_cprop_pass (pass, alter_jumps)
4289 int pass;
4290 int alter_jumps;
4291 {
4292 int changed = 0;
4293
4294 const_prop_count = 0;
4295 copy_prop_count = 0;
4296
4297 alloc_set_hash_table (max_cuid);
4298 compute_set_hash_table ();
4299 if (gcse_file)
4300 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
4301 n_sets);
4302 if (n_sets > 0)
4303 {
4304 alloc_cprop_mem (n_basic_blocks, n_sets);
4305 compute_cprop_data ();
4306 changed = cprop (alter_jumps);
4307 free_cprop_mem ();
4308 }
4309
4310 free_set_hash_table ();
4311
4312 if (gcse_file)
4313 {
4314 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4315 current_function_name, pass, bytes_used);
4316 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4317 const_prop_count, copy_prop_count);
4318 }
4319
4320 return changed;
4321 }
4322 \f
4323 /* Compute PRE+LCM working variables. */
4324
4325 /* Local properties of expressions. */
4326 /* Nonzero for expressions that are transparent in the block. */
4327 static sbitmap *transp;
4328
4329 /* Nonzero for expressions that are transparent at the end of the block.
4330 This is only zero for expressions killed by abnormal critical edge
4331 created by a calls. */
4332 static sbitmap *transpout;
4333
4334 /* Nonzero for expressions that are computed (available) in the block. */
4335 static sbitmap *comp;
4336
4337 /* Nonzero for expressions that are locally anticipatable in the block. */
4338 static sbitmap *antloc;
4339
4340 /* Nonzero for expressions where this block is an optimal computation
4341 point. */
4342 static sbitmap *pre_optimal;
4343
4344 /* Nonzero for expressions which are redundant in a particular block. */
4345 static sbitmap *pre_redundant;
4346
4347 /* Nonzero for expressions which should be inserted on a specific edge. */
4348 static sbitmap *pre_insert_map;
4349
4350 /* Nonzero for expressions which should be deleted in a specific block. */
4351 static sbitmap *pre_delete_map;
4352
4353 /* Contains the edge_list returned by pre_edge_lcm. */
4354 static struct edge_list *edge_list;
4355
4356 /* Redundant insns. */
4357 static sbitmap pre_redundant_insns;
4358
4359 /* Allocate vars used for PRE analysis. */
4360
4361 static void
4362 alloc_pre_mem (n_blocks, n_exprs)
4363 int n_blocks, n_exprs;
4364 {
4365 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4366 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4367 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4368
4369 pre_optimal = NULL;
4370 pre_redundant = NULL;
4371 pre_insert_map = NULL;
4372 pre_delete_map = NULL;
4373 ae_in = NULL;
4374 ae_out = NULL;
4375 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4376
4377 /* pre_insert and pre_delete are allocated later. */
4378 }
4379
4380 /* Free vars used for PRE analysis. */
4381
4382 static void
4383 free_pre_mem ()
4384 {
4385 sbitmap_vector_free (transp);
4386 sbitmap_vector_free (comp);
4387
4388 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4389
4390 if (pre_optimal)
4391 sbitmap_vector_free (pre_optimal);
4392 if (pre_redundant)
4393 sbitmap_vector_free (pre_redundant);
4394 if (pre_insert_map)
4395 sbitmap_vector_free (pre_insert_map);
4396 if (pre_delete_map)
4397 sbitmap_vector_free (pre_delete_map);
4398 if (ae_in)
4399 sbitmap_vector_free (ae_in);
4400 if (ae_out)
4401 sbitmap_vector_free (ae_out);
4402
4403 transp = comp = NULL;
4404 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4405 ae_in = ae_out = NULL;
4406 }
4407
4408 /* Top level routine to do the dataflow analysis needed by PRE. */
4409
4410 static void
4411 compute_pre_data ()
4412 {
4413 sbitmap trapping_expr;
4414 int i;
4415 unsigned int ui;
4416
4417 compute_local_properties (transp, comp, antloc, 0);
4418 sbitmap_vector_zero (ae_kill, n_basic_blocks);
4419
4420 /* Collect expressions which might trap. */
4421 trapping_expr = sbitmap_alloc (n_exprs);
4422 sbitmap_zero (trapping_expr);
4423 for (ui = 0; ui < expr_hash_table_size; ui++)
4424 {
4425 struct expr *e;
4426 for (e = expr_hash_table[ui]; e != NULL; e = e->next_same_hash)
4427 if (may_trap_p (e->expr))
4428 SET_BIT (trapping_expr, e->bitmap_index);
4429 }
4430
4431 /* Compute ae_kill for each basic block using:
4432
4433 ~(TRANSP | COMP)
4434
4435 This is significantly faster than compute_ae_kill. */
4436
4437 for (i = 0; i < n_basic_blocks; i++)
4438 {
4439 edge e;
4440
4441 /* If the current block is the destination of an abnormal edge, we
4442 kill all trapping expressions because we won't be able to properly
4443 place the instruction on the edge. So make them neither
4444 anticipatable nor transparent. This is fairly conservative. */
4445 for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
4446 if (e->flags & EDGE_ABNORMAL)
4447 {
4448 sbitmap_difference (antloc[i], antloc[i], trapping_expr);
4449 sbitmap_difference (transp[i], transp[i], trapping_expr);
4450 break;
4451 }
4452
4453 sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
4454 sbitmap_not (ae_kill[i], ae_kill[i]);
4455 }
4456
4457 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4458 ae_kill, &pre_insert_map, &pre_delete_map);
4459 sbitmap_vector_free (antloc);
4460 antloc = NULL;
4461 sbitmap_vector_free (ae_kill);
4462 ae_kill = NULL;
4463 free (trapping_expr);
4464 }
4465 \f
4466 /* PRE utilities */
4467
4468 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4469 block BB.
4470
4471 VISITED is a pointer to a working buffer for tracking which BB's have
4472 been visited. It is NULL for the top-level call.
4473
4474 We treat reaching expressions that go through blocks containing the same
4475 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4476 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4477 2 as not reaching. The intent is to improve the probability of finding
4478 only one reaching expression and to reduce register lifetimes by picking
4479 the closest such expression. */
4480
4481 static int
4482 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4483 basic_block occr_bb;
4484 struct expr *expr;
4485 basic_block bb;
4486 char *visited;
4487 {
4488 edge pred;
4489
4490 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
4491 {
4492 basic_block pred_bb = pred->src;
4493
4494 if (pred->src == ENTRY_BLOCK_PTR
4495 /* Has predecessor has already been visited? */
4496 || visited[pred_bb->index])
4497 ;/* Nothing to do. */
4498
4499 /* Does this predecessor generate this expression? */
4500 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
4501 {
4502 /* Is this the occurrence we're looking for?
4503 Note that there's only one generating occurrence per block
4504 so we just need to check the block number. */
4505 if (occr_bb == pred_bb)
4506 return 1;
4507
4508 visited[pred_bb->index] = 1;
4509 }
4510 /* Ignore this predecessor if it kills the expression. */
4511 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
4512 visited[pred_bb->index] = 1;
4513
4514 /* Neither gen nor kill. */
4515 else
4516 {
4517 visited[pred_bb->index] = 1;
4518 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4519 return 1;
4520 }
4521 }
4522
4523 /* All paths have been checked. */
4524 return 0;
4525 }
4526
4527 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4528 memory allocated for that function is returned. */
4529
4530 static int
4531 pre_expr_reaches_here_p (occr_bb, expr, bb)
4532 basic_block occr_bb;
4533 struct expr *expr;
4534 basic_block bb;
4535 {
4536 int rval;
4537 char *visited = (char *) xcalloc (n_basic_blocks, 1);
4538
4539 rval = pre_expr_reaches_here_p_work(occr_bb, expr, bb, visited);
4540
4541 free (visited);
4542 return rval;
4543 }
4544 \f
4545
4546 /* Given an expr, generate RTL which we can insert at the end of a BB,
4547 or on an edge. Set the block number of any insns generated to
4548 the value of BB. */
4549
4550 static rtx
4551 process_insert_insn (expr)
4552 struct expr *expr;
4553 {
4554 rtx reg = expr->reaching_reg;
4555 rtx exp = copy_rtx (expr->expr);
4556 rtx pat;
4557
4558 start_sequence ();
4559
4560 /* If the expression is something that's an operand, like a constant,
4561 just copy it to a register. */
4562 if (general_operand (exp, GET_MODE (reg)))
4563 emit_move_insn (reg, exp);
4564
4565 /* Otherwise, make a new insn to compute this expression and make sure the
4566 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4567 expression to make sure we don't have any sharing issues. */
4568 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
4569 abort ();
4570
4571 pat = gen_sequence ();
4572 end_sequence ();
4573
4574 return pat;
4575 }
4576
4577 /* Add EXPR to the end of basic block BB.
4578
4579 This is used by both the PRE and code hoisting.
4580
4581 For PRE, we want to verify that the expr is either transparent
4582 or locally anticipatable in the target block. This check makes
4583 no sense for code hoisting. */
4584
4585 static void
4586 insert_insn_end_bb (expr, bb, pre)
4587 struct expr *expr;
4588 basic_block bb;
4589 int pre;
4590 {
4591 rtx insn = bb->end;
4592 rtx new_insn;
4593 rtx reg = expr->reaching_reg;
4594 int regno = REGNO (reg);
4595 rtx pat;
4596 int i;
4597
4598 pat = process_insert_insn (expr);
4599
4600 /* If the last insn is a jump, insert EXPR in front [taking care to
4601 handle cc0, etc. properly]. */
4602
4603 if (GET_CODE (insn) == JUMP_INSN)
4604 {
4605 #ifdef HAVE_cc0
4606 rtx note;
4607 #endif
4608
4609 /* If this is a jump table, then we can't insert stuff here. Since
4610 we know the previous real insn must be the tablejump, we insert
4611 the new instruction just before the tablejump. */
4612 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4613 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4614 insn = prev_real_insn (insn);
4615
4616 #ifdef HAVE_cc0
4617 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4618 if cc0 isn't set. */
4619 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4620 if (note)
4621 insn = XEXP (note, 0);
4622 else
4623 {
4624 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4625 if (maybe_cc0_setter
4626 && INSN_P (maybe_cc0_setter)
4627 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4628 insn = maybe_cc0_setter;
4629 }
4630 #endif
4631 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4632 new_insn = emit_insn_before (pat, insn);
4633 }
4634
4635 /* Likewise if the last insn is a call, as will happen in the presence
4636 of exception handling. */
4637 else if (GET_CODE (insn) == CALL_INSN)
4638 {
4639 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4640 we search backward and place the instructions before the first
4641 parameter is loaded. Do this for everyone for consistency and a
4642 presumtion that we'll get better code elsewhere as well.
4643
4644 It should always be the case that we can put these instructions
4645 anywhere in the basic block with performing PRE optimizations.
4646 Check this. */
4647
4648 if (pre
4649 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4650 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4651 abort ();
4652
4653 /* Since different machines initialize their parameter registers
4654 in different orders, assume nothing. Collect the set of all
4655 parameter registers. */
4656 insn = find_first_parameter_load (insn, bb->head);
4657
4658 /* If we found all the parameter loads, then we want to insert
4659 before the first parameter load.
4660
4661 If we did not find all the parameter loads, then we might have
4662 stopped on the head of the block, which could be a CODE_LABEL.
4663 If we inserted before the CODE_LABEL, then we would be putting
4664 the insn in the wrong basic block. In that case, put the insn
4665 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4666 while (GET_CODE (insn) == CODE_LABEL
4667 || NOTE_INSN_BASIC_BLOCK_P (insn))
4668 insn = NEXT_INSN (insn);
4669
4670 new_insn = emit_insn_before (pat, insn);
4671 }
4672 else
4673 new_insn = emit_insn_after (pat, insn);
4674
4675 /* Keep block number table up to date.
4676 Note, PAT could be a multiple insn sequence, we have to make
4677 sure that each insn in the sequence is handled. */
4678 if (GET_CODE (pat) == SEQUENCE)
4679 {
4680 for (i = 0; i < XVECLEN (pat, 0); i++)
4681 {
4682 rtx insn = XVECEXP (pat, 0, i);
4683 if (INSN_P (insn))
4684 add_label_notes (PATTERN (insn), new_insn);
4685
4686 note_stores (PATTERN (insn), record_set_info, insn);
4687 }
4688 }
4689 else
4690 {
4691 add_label_notes (pat, new_insn);
4692
4693 /* Keep register set table up to date. */
4694 record_one_set (regno, new_insn);
4695 }
4696
4697 gcse_create_count++;
4698
4699 if (gcse_file)
4700 {
4701 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4702 bb->index, INSN_UID (new_insn));
4703 fprintf (gcse_file, "copying expression %d to reg %d\n",
4704 expr->bitmap_index, regno);
4705 }
4706 }
4707
4708 /* Insert partially redundant expressions on edges in the CFG to make
4709 the expressions fully redundant. */
4710
4711 static int
4712 pre_edge_insert (edge_list, index_map)
4713 struct edge_list *edge_list;
4714 struct expr **index_map;
4715 {
4716 int e, i, j, num_edges, set_size, did_insert = 0;
4717 sbitmap *inserted;
4718
4719 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4720 if it reaches any of the deleted expressions. */
4721
4722 set_size = pre_insert_map[0]->size;
4723 num_edges = NUM_EDGES (edge_list);
4724 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4725 sbitmap_vector_zero (inserted, num_edges);
4726
4727 for (e = 0; e < num_edges; e++)
4728 {
4729 int indx;
4730 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4731
4732 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4733 {
4734 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4735
4736 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4737 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4738 {
4739 struct expr *expr = index_map[j];
4740 struct occr *occr;
4741
4742 /* Now look at each deleted occurrence of this expression. */
4743 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4744 {
4745 if (! occr->deleted_p)
4746 continue;
4747
4748 /* Insert this expression on this edge if if it would
4749 reach the deleted occurrence in BB. */
4750 if (!TEST_BIT (inserted[e], j))
4751 {
4752 rtx insn;
4753 edge eg = INDEX_EDGE (edge_list, e);
4754
4755 /* We can't insert anything on an abnormal and
4756 critical edge, so we insert the insn at the end of
4757 the previous block. There are several alternatives
4758 detailed in Morgans book P277 (sec 10.5) for
4759 handling this situation. This one is easiest for
4760 now. */
4761
4762 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
4763 insert_insn_end_bb (index_map[j], bb, 0);
4764 else
4765 {
4766 insn = process_insert_insn (index_map[j]);
4767 insert_insn_on_edge (insn, eg);
4768 }
4769
4770 if (gcse_file)
4771 {
4772 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4773 bb->index,
4774 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4775 fprintf (gcse_file, "copy expression %d\n",
4776 expr->bitmap_index);
4777 }
4778
4779 update_ld_motion_stores (expr);
4780 SET_BIT (inserted[e], j);
4781 did_insert = 1;
4782 gcse_create_count++;
4783 }
4784 }
4785 }
4786 }
4787 }
4788
4789 sbitmap_vector_free (inserted);
4790 return did_insert;
4791 }
4792
4793 /* Copy the result of INSN to REG. INDX is the expression number. */
4794
4795 static void
4796 pre_insert_copy_insn (expr, insn)
4797 struct expr *expr;
4798 rtx insn;
4799 {
4800 rtx reg = expr->reaching_reg;
4801 int regno = REGNO (reg);
4802 int indx = expr->bitmap_index;
4803 rtx set = single_set (insn);
4804 rtx new_insn;
4805
4806 if (!set)
4807 abort ();
4808
4809 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
4810
4811 /* Keep register set table up to date. */
4812 record_one_set (regno, new_insn);
4813
4814 gcse_create_count++;
4815
4816 if (gcse_file)
4817 fprintf (gcse_file,
4818 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4819 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4820 INSN_UID (insn), regno);
4821 update_ld_motion_stores (expr);
4822 }
4823
4824 /* Copy available expressions that reach the redundant expression
4825 to `reaching_reg'. */
4826
4827 static void
4828 pre_insert_copies ()
4829 {
4830 unsigned int i;
4831 struct expr *expr;
4832 struct occr *occr;
4833 struct occr *avail;
4834
4835 /* For each available expression in the table, copy the result to
4836 `reaching_reg' if the expression reaches a deleted one.
4837
4838 ??? The current algorithm is rather brute force.
4839 Need to do some profiling. */
4840
4841 for (i = 0; i < expr_hash_table_size; i++)
4842 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4843 {
4844 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4845 we don't want to insert a copy here because the expression may not
4846 really be redundant. So only insert an insn if the expression was
4847 deleted. This test also avoids further processing if the
4848 expression wasn't deleted anywhere. */
4849 if (expr->reaching_reg == NULL)
4850 continue;
4851
4852 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4853 {
4854 if (! occr->deleted_p)
4855 continue;
4856
4857 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4858 {
4859 rtx insn = avail->insn;
4860
4861 /* No need to handle this one if handled already. */
4862 if (avail->copied_p)
4863 continue;
4864
4865 /* Don't handle this one if it's a redundant one. */
4866 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4867 continue;
4868
4869 /* Or if the expression doesn't reach the deleted one. */
4870 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4871 expr,
4872 BLOCK_FOR_INSN (occr->insn)))
4873 continue;
4874
4875 /* Copy the result of avail to reaching_reg. */
4876 pre_insert_copy_insn (expr, insn);
4877 avail->copied_p = 1;
4878 }
4879 }
4880 }
4881 }
4882
4883 /* Delete redundant computations.
4884 Deletion is done by changing the insn to copy the `reaching_reg' of
4885 the expression into the result of the SET. It is left to later passes
4886 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4887
4888 Returns non-zero if a change is made. */
4889
4890 static int
4891 pre_delete ()
4892 {
4893 unsigned int i;
4894 int changed;
4895 struct expr *expr;
4896 struct occr *occr;
4897
4898 changed = 0;
4899 for (i = 0; i < expr_hash_table_size; i++)
4900 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4901 {
4902 int indx = expr->bitmap_index;
4903
4904 /* We only need to search antic_occr since we require
4905 ANTLOC != 0. */
4906
4907 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4908 {
4909 rtx insn = occr->insn;
4910 rtx set;
4911 basic_block bb = BLOCK_FOR_INSN (insn);
4912
4913 if (TEST_BIT (pre_delete_map[bb->index], indx))
4914 {
4915 set = single_set (insn);
4916 if (! set)
4917 abort ();
4918
4919 /* Create a pseudo-reg to store the result of reaching
4920 expressions into. Get the mode for the new pseudo from
4921 the mode of the original destination pseudo. */
4922 if (expr->reaching_reg == NULL)
4923 expr->reaching_reg
4924 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4925
4926 /* In theory this should never fail since we're creating
4927 a reg->reg copy.
4928
4929 However, on the x86 some of the movXX patterns actually
4930 contain clobbers of scratch regs. This may cause the
4931 insn created by validate_change to not match any pattern
4932 and thus cause validate_change to fail. */
4933 if (validate_change (insn, &SET_SRC (set),
4934 expr->reaching_reg, 0))
4935 {
4936 occr->deleted_p = 1;
4937 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4938 changed = 1;
4939 gcse_subst_count++;
4940 }
4941
4942 if (gcse_file)
4943 {
4944 fprintf (gcse_file,
4945 "PRE: redundant insn %d (expression %d) in ",
4946 INSN_UID (insn), indx);
4947 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4948 bb->index, REGNO (expr->reaching_reg));
4949 }
4950 }
4951 }
4952 }
4953
4954 return changed;
4955 }
4956
4957 /* Perform GCSE optimizations using PRE.
4958 This is called by one_pre_gcse_pass after all the dataflow analysis
4959 has been done.
4960
4961 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4962 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4963 Compiler Design and Implementation.
4964
4965 ??? A new pseudo reg is created to hold the reaching expression. The nice
4966 thing about the classical approach is that it would try to use an existing
4967 reg. If the register can't be adequately optimized [i.e. we introduce
4968 reload problems], one could add a pass here to propagate the new register
4969 through the block.
4970
4971 ??? We don't handle single sets in PARALLELs because we're [currently] not
4972 able to copy the rest of the parallel when we insert copies to create full
4973 redundancies from partial redundancies. However, there's no reason why we
4974 can't handle PARALLELs in the cases where there are no partial
4975 redundancies. */
4976
4977 static int
4978 pre_gcse ()
4979 {
4980 unsigned int i;
4981 int did_insert, changed;
4982 struct expr **index_map;
4983 struct expr *expr;
4984
4985 /* Compute a mapping from expression number (`bitmap_index') to
4986 hash table entry. */
4987
4988 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
4989 for (i = 0; i < expr_hash_table_size; i++)
4990 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4991 index_map[expr->bitmap_index] = expr;
4992
4993 /* Reset bitmap used to track which insns are redundant. */
4994 pre_redundant_insns = sbitmap_alloc (max_cuid);
4995 sbitmap_zero (pre_redundant_insns);
4996
4997 /* Delete the redundant insns first so that
4998 - we know what register to use for the new insns and for the other
4999 ones with reaching expressions
5000 - we know which insns are redundant when we go to create copies */
5001
5002 changed = pre_delete ();
5003
5004 did_insert = pre_edge_insert (edge_list, index_map);
5005
5006 /* In other places with reaching expressions, copy the expression to the
5007 specially allocated pseudo-reg that reaches the redundant expr. */
5008 pre_insert_copies ();
5009 if (did_insert)
5010 {
5011 commit_edge_insertions ();
5012 changed = 1;
5013 }
5014
5015 free (index_map);
5016 free (pre_redundant_insns);
5017 return changed;
5018 }
5019
5020 /* Top level routine to perform one PRE GCSE pass.
5021
5022 Return non-zero if a change was made. */
5023
5024 static int
5025 one_pre_gcse_pass (pass)
5026 int pass;
5027 {
5028 int changed = 0;
5029
5030 gcse_subst_count = 0;
5031 gcse_create_count = 0;
5032
5033 alloc_expr_hash_table (max_cuid);
5034 add_noreturn_fake_exit_edges ();
5035 if (flag_gcse_lm)
5036 compute_ld_motion_mems ();
5037
5038 compute_expr_hash_table ();
5039 trim_ld_motion_mems ();
5040 if (gcse_file)
5041 dump_hash_table (gcse_file, "Expression", expr_hash_table,
5042 expr_hash_table_size, n_exprs);
5043
5044 if (n_exprs > 0)
5045 {
5046 alloc_pre_mem (n_basic_blocks, n_exprs);
5047 compute_pre_data ();
5048 changed |= pre_gcse ();
5049 free_edge_list (edge_list);
5050 free_pre_mem ();
5051 }
5052
5053 free_ldst_mems ();
5054 remove_fake_edges ();
5055 free_expr_hash_table ();
5056
5057 if (gcse_file)
5058 {
5059 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5060 current_function_name, pass, bytes_used);
5061 fprintf (gcse_file, "%d substs, %d insns created\n",
5062 gcse_subst_count, gcse_create_count);
5063 }
5064
5065 return changed;
5066 }
5067 \f
5068 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5069 If notes are added to an insn which references a CODE_LABEL, the
5070 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5071 because the following loop optimization pass requires them. */
5072
5073 /* ??? This is very similar to the loop.c add_label_notes function. We
5074 could probably share code here. */
5075
5076 /* ??? If there was a jump optimization pass after gcse and before loop,
5077 then we would not need to do this here, because jump would add the
5078 necessary REG_LABEL notes. */
5079
5080 static void
5081 add_label_notes (x, insn)
5082 rtx x;
5083 rtx insn;
5084 {
5085 enum rtx_code code = GET_CODE (x);
5086 int i, j;
5087 const char *fmt;
5088
5089 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5090 {
5091 /* This code used to ignore labels that referred to dispatch tables to
5092 avoid flow generating (slighly) worse code.
5093
5094 We no longer ignore such label references (see LABEL_REF handling in
5095 mark_jump_label for additional information). */
5096
5097 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5098 REG_NOTES (insn));
5099 if (LABEL_P (XEXP (x, 0)))
5100 LABEL_NUSES (XEXP (x, 0))++;
5101 return;
5102 }
5103
5104 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5105 {
5106 if (fmt[i] == 'e')
5107 add_label_notes (XEXP (x, i), insn);
5108 else if (fmt[i] == 'E')
5109 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5110 add_label_notes (XVECEXP (x, i, j), insn);
5111 }
5112 }
5113
5114 /* Compute transparent outgoing information for each block.
5115
5116 An expression is transparent to an edge unless it is killed by
5117 the edge itself. This can only happen with abnormal control flow,
5118 when the edge is traversed through a call. This happens with
5119 non-local labels and exceptions.
5120
5121 This would not be necessary if we split the edge. While this is
5122 normally impossible for abnormal critical edges, with some effort
5123 it should be possible with exception handling, since we still have
5124 control over which handler should be invoked. But due to increased
5125 EH table sizes, this may not be worthwhile. */
5126
5127 static void
5128 compute_transpout ()
5129 {
5130 int bb;
5131 unsigned int i;
5132 struct expr *expr;
5133
5134 sbitmap_vector_ones (transpout, n_basic_blocks);
5135
5136 for (bb = 0; bb < n_basic_blocks; ++bb)
5137 {
5138 /* Note that flow inserted a nop a the end of basic blocks that
5139 end in call instructions for reasons other than abnormal
5140 control flow. */
5141 if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
5142 continue;
5143
5144 for (i = 0; i < expr_hash_table_size; i++)
5145 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
5146 if (GET_CODE (expr->expr) == MEM)
5147 {
5148 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5149 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5150 continue;
5151
5152 /* ??? Optimally, we would use interprocedural alias
5153 analysis to determine if this mem is actually killed
5154 by this call. */
5155 RESET_BIT (transpout[bb], expr->bitmap_index);
5156 }
5157 }
5158 }
5159
5160 /* Removal of useless null pointer checks */
5161
5162 /* Called via note_stores. X is set by SETTER. If X is a register we must
5163 invalidate nonnull_local and set nonnull_killed. DATA is really a
5164 `null_pointer_info *'.
5165
5166 We ignore hard registers. */
5167
5168 static void
5169 invalidate_nonnull_info (x, setter, data)
5170 rtx x;
5171 rtx setter ATTRIBUTE_UNUSED;
5172 void *data;
5173 {
5174 unsigned int regno;
5175 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5176
5177 while (GET_CODE (x) == SUBREG)
5178 x = SUBREG_REG (x);
5179
5180 /* Ignore anything that is not a register or is a hard register. */
5181 if (GET_CODE (x) != REG
5182 || REGNO (x) < npi->min_reg
5183 || REGNO (x) >= npi->max_reg)
5184 return;
5185
5186 regno = REGNO (x) - npi->min_reg;
5187
5188 RESET_BIT (npi->nonnull_local[npi->current_block], regno);
5189 SET_BIT (npi->nonnull_killed[npi->current_block], regno);
5190 }
5191
5192 /* Do null-pointer check elimination for the registers indicated in
5193 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5194 they are not our responsibility to free. */
5195
5196 static void
5197 delete_null_pointer_checks_1 (delete_list, block_reg, nonnull_avin,
5198 nonnull_avout, npi)
5199 varray_type *delete_list;
5200 unsigned int *block_reg;
5201 sbitmap *nonnull_avin;
5202 sbitmap *nonnull_avout;
5203 struct null_pointer_info *npi;
5204 {
5205 int bb;
5206 int current_block;
5207 sbitmap *nonnull_local = npi->nonnull_local;
5208 sbitmap *nonnull_killed = npi->nonnull_killed;
5209
5210 /* Compute local properties, nonnull and killed. A register will have
5211 the nonnull property if at the end of the current block its value is
5212 known to be nonnull. The killed property indicates that somewhere in
5213 the block any information we had about the register is killed.
5214
5215 Note that a register can have both properties in a single block. That
5216 indicates that it's killed, then later in the block a new value is
5217 computed. */
5218 sbitmap_vector_zero (nonnull_local, n_basic_blocks);
5219 sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
5220
5221 for (current_block = 0; current_block < n_basic_blocks; current_block++)
5222 {
5223 rtx insn, stop_insn;
5224
5225 /* Set the current block for invalidate_nonnull_info. */
5226 npi->current_block = current_block;
5227
5228 /* Scan each insn in the basic block looking for memory references and
5229 register sets. */
5230 stop_insn = NEXT_INSN (BLOCK_END (current_block));
5231 for (insn = BLOCK_HEAD (current_block);
5232 insn != stop_insn;
5233 insn = NEXT_INSN (insn))
5234 {
5235 rtx set;
5236 rtx reg;
5237
5238 /* Ignore anything that is not a normal insn. */
5239 if (! INSN_P (insn))
5240 continue;
5241
5242 /* Basically ignore anything that is not a simple SET. We do have
5243 to make sure to invalidate nonnull_local and set nonnull_killed
5244 for such insns though. */
5245 set = single_set (insn);
5246 if (!set)
5247 {
5248 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5249 continue;
5250 }
5251
5252 /* See if we've got a usable memory load. We handle it first
5253 in case it uses its address register as a dest (which kills
5254 the nonnull property). */
5255 if (GET_CODE (SET_SRC (set)) == MEM
5256 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5257 && REGNO (reg) >= npi->min_reg
5258 && REGNO (reg) < npi->max_reg)
5259 SET_BIT (nonnull_local[current_block],
5260 REGNO (reg) - npi->min_reg);
5261
5262 /* Now invalidate stuff clobbered by this insn. */
5263 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5264
5265 /* And handle stores, we do these last since any sets in INSN can
5266 not kill the nonnull property if it is derived from a MEM
5267 appearing in a SET_DEST. */
5268 if (GET_CODE (SET_DEST (set)) == MEM
5269 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5270 && REGNO (reg) >= npi->min_reg
5271 && REGNO (reg) < npi->max_reg)
5272 SET_BIT (nonnull_local[current_block],
5273 REGNO (reg) - npi->min_reg);
5274 }
5275 }
5276
5277 /* Now compute global properties based on the local properties. This
5278 is a classic global availablity algorithm. */
5279 compute_available (nonnull_local, nonnull_killed,
5280 nonnull_avout, nonnull_avin);
5281
5282 /* Now look at each bb and see if it ends with a compare of a value
5283 against zero. */
5284 for (bb = 0; bb < n_basic_blocks; bb++)
5285 {
5286 rtx last_insn = BLOCK_END (bb);
5287 rtx condition, earliest;
5288 int compare_and_branch;
5289
5290 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5291 since BLOCK_REG[BB] is zero if this block did not end with a
5292 comparison against zero, this condition works. */
5293 if (block_reg[bb] < npi->min_reg
5294 || block_reg[bb] >= npi->max_reg)
5295 continue;
5296
5297 /* LAST_INSN is a conditional jump. Get its condition. */
5298 condition = get_condition (last_insn, &earliest);
5299
5300 /* If we can't determine the condition then skip. */
5301 if (! condition)
5302 continue;
5303
5304 /* Is the register known to have a nonzero value? */
5305 if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
5306 continue;
5307
5308 /* Try to compute whether the compare/branch at the loop end is one or
5309 two instructions. */
5310 if (earliest == last_insn)
5311 compare_and_branch = 1;
5312 else if (earliest == prev_nonnote_insn (last_insn))
5313 compare_and_branch = 2;
5314 else
5315 continue;
5316
5317 /* We know the register in this comparison is nonnull at exit from
5318 this block. We can optimize this comparison. */
5319 if (GET_CODE (condition) == NE)
5320 {
5321 rtx new_jump;
5322
5323 new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
5324 last_insn);
5325 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5326 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5327 emit_barrier_after (new_jump);
5328 }
5329 if (!*delete_list)
5330 VARRAY_RTX_INIT (*delete_list, 10, "delete_list");
5331
5332 VARRAY_PUSH_RTX (*delete_list, last_insn);
5333 if (compare_and_branch == 2)
5334 VARRAY_PUSH_RTX (*delete_list, earliest);
5335
5336 /* Don't check this block again. (Note that BLOCK_END is
5337 invalid here; we deleted the last instruction in the
5338 block.) */
5339 block_reg[bb] = 0;
5340 }
5341 }
5342
5343 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5344 at compile time.
5345
5346 This is conceptually similar to global constant/copy propagation and
5347 classic global CSE (it even uses the same dataflow equations as cprop).
5348
5349 If a register is used as memory address with the form (mem (reg)), then we
5350 know that REG can not be zero at that point in the program. Any instruction
5351 which sets REG "kills" this property.
5352
5353 So, if every path leading to a conditional branch has an available memory
5354 reference of that form, then we know the register can not have the value
5355 zero at the conditional branch.
5356
5357 So we merely need to compute the local properies and propagate that data
5358 around the cfg, then optimize where possible.
5359
5360 We run this pass two times. Once before CSE, then again after CSE. This
5361 has proven to be the most profitable approach. It is rare for new
5362 optimization opportunities of this nature to appear after the first CSE
5363 pass.
5364
5365 This could probably be integrated with global cprop with a little work. */
5366
5367 void
5368 delete_null_pointer_checks (f)
5369 rtx f ATTRIBUTE_UNUSED;
5370 {
5371 sbitmap *nonnull_avin, *nonnull_avout;
5372 unsigned int *block_reg;
5373 varray_type delete_list = NULL;
5374 int bb;
5375 int reg;
5376 int regs_per_pass;
5377 int max_reg;
5378 unsigned int i;
5379 struct null_pointer_info npi;
5380
5381 /* If we have only a single block, then there's nothing to do. */
5382 if (n_basic_blocks <= 1)
5383 return;
5384
5385 /* Trying to perform global optimizations on flow graphs which have
5386 a high connectivity will take a long time and is unlikely to be
5387 particularly useful.
5388
5389 In normal circumstances a cfg should have about twice as many edges
5390 as blocks. But we do not want to punish small functions which have
5391 a couple switch statements. So we require a relatively large number
5392 of basic blocks and the ratio of edges to blocks to be high. */
5393 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5394 return;
5395
5396 /* We need four bitmaps, each with a bit for each register in each
5397 basic block. */
5398 max_reg = max_reg_num ();
5399 regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
5400
5401 /* Allocate bitmaps to hold local and global properties. */
5402 npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5403 npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5404 nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5405 nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5406
5407 /* Go through the basic blocks, seeing whether or not each block
5408 ends with a conditional branch whose condition is a comparison
5409 against zero. Record the register compared in BLOCK_REG. */
5410 block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
5411 for (bb = 0; bb < n_basic_blocks; bb++)
5412 {
5413 rtx last_insn = BLOCK_END (bb);
5414 rtx condition, earliest, reg;
5415
5416 /* We only want conditional branches. */
5417 if (GET_CODE (last_insn) != JUMP_INSN
5418 || !any_condjump_p (last_insn)
5419 || !onlyjump_p (last_insn))
5420 continue;
5421
5422 /* LAST_INSN is a conditional jump. Get its condition. */
5423 condition = get_condition (last_insn, &earliest);
5424
5425 /* If we were unable to get the condition, or it is not an equality
5426 comparison against zero then there's nothing we can do. */
5427 if (!condition
5428 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5429 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5430 || (XEXP (condition, 1)
5431 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5432 continue;
5433
5434 /* We must be checking a register against zero. */
5435 reg = XEXP (condition, 0);
5436 if (GET_CODE (reg) != REG)
5437 continue;
5438
5439 block_reg[bb] = REGNO (reg);
5440 }
5441
5442 /* Go through the algorithm for each block of registers. */
5443 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5444 {
5445 npi.min_reg = reg;
5446 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5447 delete_null_pointer_checks_1 (&delete_list, block_reg, nonnull_avin,
5448 nonnull_avout, &npi);
5449 }
5450
5451 /* Now delete the instructions all at once. This breaks the CFG. */
5452 if (delete_list)
5453 {
5454 for (i = 0; i < VARRAY_ACTIVE_SIZE (delete_list); i++)
5455 delete_related_insns (VARRAY_RTX (delete_list, i));
5456 VARRAY_FREE (delete_list);
5457 }
5458
5459 /* Free the table of registers compared at the end of every block. */
5460 free (block_reg);
5461
5462 /* Free bitmaps. */
5463 sbitmap_vector_free (npi.nonnull_local);
5464 sbitmap_vector_free (npi.nonnull_killed);
5465 sbitmap_vector_free (nonnull_avin);
5466 sbitmap_vector_free (nonnull_avout);
5467 }
5468
5469 /* Code Hoisting variables and subroutines. */
5470
5471 /* Very busy expressions. */
5472 static sbitmap *hoist_vbein;
5473 static sbitmap *hoist_vbeout;
5474
5475 /* Hoistable expressions. */
5476 static sbitmap *hoist_exprs;
5477
5478 /* Dominator bitmaps. */
5479 static sbitmap *dominators;
5480
5481 /* ??? We could compute post dominators and run this algorithm in
5482 reverse to to perform tail merging, doing so would probably be
5483 more effective than the tail merging code in jump.c.
5484
5485 It's unclear if tail merging could be run in parallel with
5486 code hoisting. It would be nice. */
5487
5488 /* Allocate vars used for code hoisting analysis. */
5489
5490 static void
5491 alloc_code_hoist_mem (n_blocks, n_exprs)
5492 int n_blocks, n_exprs;
5493 {
5494 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5495 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5496 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5497
5498 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5499 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5500 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5501 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5502
5503 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5504 }
5505
5506 /* Free vars used for code hoisting analysis. */
5507
5508 static void
5509 free_code_hoist_mem ()
5510 {
5511 sbitmap_vector_free (antloc);
5512 sbitmap_vector_free (transp);
5513 sbitmap_vector_free (comp);
5514
5515 sbitmap_vector_free (hoist_vbein);
5516 sbitmap_vector_free (hoist_vbeout);
5517 sbitmap_vector_free (hoist_exprs);
5518 sbitmap_vector_free (transpout);
5519
5520 sbitmap_vector_free (dominators);
5521 }
5522
5523 /* Compute the very busy expressions at entry/exit from each block.
5524
5525 An expression is very busy if all paths from a given point
5526 compute the expression. */
5527
5528 static void
5529 compute_code_hoist_vbeinout ()
5530 {
5531 int bb, changed, passes;
5532
5533 sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
5534 sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
5535
5536 passes = 0;
5537 changed = 1;
5538
5539 while (changed)
5540 {
5541 changed = 0;
5542
5543 /* We scan the blocks in the reverse order to speed up
5544 the convergence. */
5545 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
5546 {
5547 changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
5548 hoist_vbeout[bb], transp[bb]);
5549 if (bb != n_basic_blocks - 1)
5550 sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
5551 }
5552
5553 passes++;
5554 }
5555
5556 if (gcse_file)
5557 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5558 }
5559
5560 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5561
5562 static void
5563 compute_code_hoist_data ()
5564 {
5565 compute_local_properties (transp, comp, antloc, 0);
5566 compute_transpout ();
5567 compute_code_hoist_vbeinout ();
5568 calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
5569 if (gcse_file)
5570 fprintf (gcse_file, "\n");
5571 }
5572
5573 /* Determine if the expression identified by EXPR_INDEX would
5574 reach BB unimpared if it was placed at the end of EXPR_BB.
5575
5576 It's unclear exactly what Muchnick meant by "unimpared". It seems
5577 to me that the expression must either be computed or transparent in
5578 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5579 would allow the expression to be hoisted out of loops, even if
5580 the expression wasn't a loop invariant.
5581
5582 Contrast this to reachability for PRE where an expression is
5583 considered reachable if *any* path reaches instead of *all*
5584 paths. */
5585
5586 static int
5587 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5588 basic_block expr_bb;
5589 int expr_index;
5590 basic_block bb;
5591 char *visited;
5592 {
5593 edge pred;
5594 int visited_allocated_locally = 0;
5595
5596
5597 if (visited == NULL)
5598 {
5599 visited_allocated_locally = 1;
5600 visited = xcalloc (n_basic_blocks, 1);
5601 }
5602
5603 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5604 {
5605 basic_block pred_bb = pred->src;
5606
5607 if (pred->src == ENTRY_BLOCK_PTR)
5608 break;
5609 else if (visited[pred_bb->index])
5610 continue;
5611
5612 /* Does this predecessor generate this expression? */
5613 else if (TEST_BIT (comp[pred_bb->index], expr_index))
5614 break;
5615 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
5616 break;
5617
5618 /* Not killed. */
5619 else
5620 {
5621 visited[pred_bb->index] = 1;
5622 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5623 pred_bb, visited))
5624 break;
5625 }
5626 }
5627 if (visited_allocated_locally)
5628 free (visited);
5629
5630 return (pred == NULL);
5631 }
5632 \f
5633 /* Actually perform code hoisting. */
5634
5635 static void
5636 hoist_code ()
5637 {
5638 int bb, dominated;
5639 unsigned int i;
5640 struct expr **index_map;
5641 struct expr *expr;
5642
5643 sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
5644
5645 /* Compute a mapping from expression number (`bitmap_index') to
5646 hash table entry. */
5647
5648 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5649 for (i = 0; i < expr_hash_table_size; i++)
5650 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5651 index_map[expr->bitmap_index] = expr;
5652
5653 /* Walk over each basic block looking for potentially hoistable
5654 expressions, nothing gets hoisted from the entry block. */
5655 for (bb = 0; bb < n_basic_blocks; bb++)
5656 {
5657 int found = 0;
5658 int insn_inserted_p;
5659
5660 /* Examine each expression that is very busy at the exit of this
5661 block. These are the potentially hoistable expressions. */
5662 for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
5663 {
5664 int hoistable = 0;
5665
5666 if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
5667 {
5668 /* We've found a potentially hoistable expression, now
5669 we look at every block BB dominates to see if it
5670 computes the expression. */
5671 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5672 {
5673 /* Ignore self dominance. */
5674 if (bb == dominated
5675 || ! TEST_BIT (dominators[dominated], bb))
5676 continue;
5677
5678 /* We've found a dominated block, now see if it computes
5679 the busy expression and whether or not moving that
5680 expression to the "beginning" of that block is safe. */
5681 if (!TEST_BIT (antloc[dominated], i))
5682 continue;
5683
5684 /* Note if the expression would reach the dominated block
5685 unimpared if it was placed at the end of BB.
5686
5687 Keep track of how many times this expression is hoistable
5688 from a dominated block into BB. */
5689 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5690 BASIC_BLOCK (dominated), NULL))
5691 hoistable++;
5692 }
5693
5694 /* If we found more than one hoistable occurrence of this
5695 expression, then note it in the bitmap of expressions to
5696 hoist. It makes no sense to hoist things which are computed
5697 in only one BB, and doing so tends to pessimize register
5698 allocation. One could increase this value to try harder
5699 to avoid any possible code expansion due to register
5700 allocation issues; however experiments have shown that
5701 the vast majority of hoistable expressions are only movable
5702 from two successors, so raising this threshhold is likely
5703 to nullify any benefit we get from code hoisting. */
5704 if (hoistable > 1)
5705 {
5706 SET_BIT (hoist_exprs[bb], i);
5707 found = 1;
5708 }
5709 }
5710 }
5711
5712 /* If we found nothing to hoist, then quit now. */
5713 if (! found)
5714 continue;
5715
5716 /* Loop over all the hoistable expressions. */
5717 for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
5718 {
5719 /* We want to insert the expression into BB only once, so
5720 note when we've inserted it. */
5721 insn_inserted_p = 0;
5722
5723 /* These tests should be the same as the tests above. */
5724 if (TEST_BIT (hoist_vbeout[bb], i))
5725 {
5726 /* We've found a potentially hoistable expression, now
5727 we look at every block BB dominates to see if it
5728 computes the expression. */
5729 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5730 {
5731 /* Ignore self dominance. */
5732 if (bb == dominated
5733 || ! TEST_BIT (dominators[dominated], bb))
5734 continue;
5735
5736 /* We've found a dominated block, now see if it computes
5737 the busy expression and whether or not moving that
5738 expression to the "beginning" of that block is safe. */
5739 if (!TEST_BIT (antloc[dominated], i))
5740 continue;
5741
5742 /* The expression is computed in the dominated block and
5743 it would be safe to compute it at the start of the
5744 dominated block. Now we have to determine if the
5745 expression would reach the dominated block if it was
5746 placed at the end of BB. */
5747 if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
5748 BASIC_BLOCK (dominated), NULL))
5749 {
5750 struct expr *expr = index_map[i];
5751 struct occr *occr = expr->antic_occr;
5752 rtx insn;
5753 rtx set;
5754
5755 /* Find the right occurrence of this expression. */
5756 while (BLOCK_NUM (occr->insn) != dominated && occr)
5757 occr = occr->next;
5758
5759 /* Should never happen. */
5760 if (!occr)
5761 abort ();
5762
5763 insn = occr->insn;
5764
5765 set = single_set (insn);
5766 if (! set)
5767 abort ();
5768
5769 /* Create a pseudo-reg to store the result of reaching
5770 expressions into. Get the mode for the new pseudo
5771 from the mode of the original destination pseudo. */
5772 if (expr->reaching_reg == NULL)
5773 expr->reaching_reg
5774 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5775
5776 /* In theory this should never fail since we're creating
5777 a reg->reg copy.
5778
5779 However, on the x86 some of the movXX patterns
5780 actually contain clobbers of scratch regs. This may
5781 cause the insn created by validate_change to not
5782 match any pattern and thus cause validate_change to
5783 fail. */
5784 if (validate_change (insn, &SET_SRC (set),
5785 expr->reaching_reg, 0))
5786 {
5787 occr->deleted_p = 1;
5788 if (!insn_inserted_p)
5789 {
5790 insert_insn_end_bb (index_map[i],
5791 BASIC_BLOCK (bb), 0);
5792 insn_inserted_p = 1;
5793 }
5794 }
5795 }
5796 }
5797 }
5798 }
5799 }
5800
5801 free (index_map);
5802 }
5803
5804 /* Top level routine to perform one code hoisting (aka unification) pass
5805
5806 Return non-zero if a change was made. */
5807
5808 static int
5809 one_code_hoisting_pass ()
5810 {
5811 int changed = 0;
5812
5813 alloc_expr_hash_table (max_cuid);
5814 compute_expr_hash_table ();
5815 if (gcse_file)
5816 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
5817 expr_hash_table_size, n_exprs);
5818
5819 if (n_exprs > 0)
5820 {
5821 alloc_code_hoist_mem (n_basic_blocks, n_exprs);
5822 compute_code_hoist_data ();
5823 hoist_code ();
5824 free_code_hoist_mem ();
5825 }
5826
5827 free_expr_hash_table ();
5828
5829 return changed;
5830 }
5831 \f
5832 /* Here we provide the things required to do store motion towards
5833 the exit. In order for this to be effective, gcse also needed to
5834 be taught how to move a load when it is kill only by a store to itself.
5835
5836 int i;
5837 float a[10];
5838
5839 void foo(float scale)
5840 {
5841 for (i=0; i<10; i++)
5842 a[i] *= scale;
5843 }
5844
5845 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5846 the load out since its live around the loop, and stored at the bottom
5847 of the loop.
5848
5849 The 'Load Motion' referred to and implemented in this file is
5850 an enhancement to gcse which when using edge based lcm, recognizes
5851 this situation and allows gcse to move the load out of the loop.
5852
5853 Once gcse has hoisted the load, store motion can then push this
5854 load towards the exit, and we end up with no loads or stores of 'i'
5855 in the loop. */
5856
5857 /* This will search the ldst list for a matching expression. If it
5858 doesn't find one, we create one and initialize it. */
5859
5860 static struct ls_expr *
5861 ldst_entry (x)
5862 rtx x;
5863 {
5864 struct ls_expr * ptr;
5865
5866 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5867 if (expr_equiv_p (ptr->pattern, x))
5868 break;
5869
5870 if (!ptr)
5871 {
5872 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
5873
5874 ptr->next = pre_ldst_mems;
5875 ptr->expr = NULL;
5876 ptr->pattern = x;
5877 ptr->loads = NULL_RTX;
5878 ptr->stores = NULL_RTX;
5879 ptr->reaching_reg = NULL_RTX;
5880 ptr->invalid = 0;
5881 ptr->index = 0;
5882 ptr->hash_index = 0;
5883 pre_ldst_mems = ptr;
5884 }
5885
5886 return ptr;
5887 }
5888
5889 /* Free up an individual ldst entry. */
5890
5891 static void
5892 free_ldst_entry (ptr)
5893 struct ls_expr * ptr;
5894 {
5895 free_INSN_LIST_list (& ptr->loads);
5896 free_INSN_LIST_list (& ptr->stores);
5897
5898 free (ptr);
5899 }
5900
5901 /* Free up all memory associated with the ldst list. */
5902
5903 static void
5904 free_ldst_mems ()
5905 {
5906 while (pre_ldst_mems)
5907 {
5908 struct ls_expr * tmp = pre_ldst_mems;
5909
5910 pre_ldst_mems = pre_ldst_mems->next;
5911
5912 free_ldst_entry (tmp);
5913 }
5914
5915 pre_ldst_mems = NULL;
5916 }
5917
5918 /* Dump debugging info about the ldst list. */
5919
5920 static void
5921 print_ldst_list (file)
5922 FILE * file;
5923 {
5924 struct ls_expr * ptr;
5925
5926 fprintf (file, "LDST list: \n");
5927
5928 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5929 {
5930 fprintf (file, " Pattern (%3d): ", ptr->index);
5931
5932 print_rtl (file, ptr->pattern);
5933
5934 fprintf (file, "\n Loads : ");
5935
5936 if (ptr->loads)
5937 print_rtl (file, ptr->loads);
5938 else
5939 fprintf (file, "(nil)");
5940
5941 fprintf (file, "\n Stores : ");
5942
5943 if (ptr->stores)
5944 print_rtl (file, ptr->stores);
5945 else
5946 fprintf (file, "(nil)");
5947
5948 fprintf (file, "\n\n");
5949 }
5950
5951 fprintf (file, "\n");
5952 }
5953
5954 /* Returns 1 if X is in the list of ldst only expressions. */
5955
5956 static struct ls_expr *
5957 find_rtx_in_ldst (x)
5958 rtx x;
5959 {
5960 struct ls_expr * ptr;
5961
5962 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5963 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
5964 return ptr;
5965
5966 return NULL;
5967 }
5968
5969 /* Assign each element of the list of mems a monotonically increasing value. */
5970
5971 static int
5972 enumerate_ldsts ()
5973 {
5974 struct ls_expr * ptr;
5975 int n = 0;
5976
5977 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5978 ptr->index = n++;
5979
5980 return n;
5981 }
5982
5983 /* Return first item in the list. */
5984
5985 static inline struct ls_expr *
5986 first_ls_expr ()
5987 {
5988 return pre_ldst_mems;
5989 }
5990
5991 /* Return the next item in ther list after the specified one. */
5992
5993 static inline struct ls_expr *
5994 next_ls_expr (ptr)
5995 struct ls_expr * ptr;
5996 {
5997 return ptr->next;
5998 }
5999 \f
6000 /* Load Motion for loads which only kill themselves. */
6001
6002 /* Return true if x is a simple MEM operation, with no registers or
6003 side effects. These are the types of loads we consider for the
6004 ld_motion list, otherwise we let the usual aliasing take care of it. */
6005
6006 static int
6007 simple_mem (x)
6008 rtx x;
6009 {
6010 if (GET_CODE (x) != MEM)
6011 return 0;
6012
6013 if (MEM_VOLATILE_P (x))
6014 return 0;
6015
6016 if (GET_MODE (x) == BLKmode)
6017 return 0;
6018
6019 if (!rtx_varies_p (XEXP (x, 0), 0))
6020 return 1;
6021
6022 return 0;
6023 }
6024
6025 /* Make sure there isn't a buried reference in this pattern anywhere.
6026 If there is, invalidate the entry for it since we're not capable
6027 of fixing it up just yet.. We have to be sure we know about ALL
6028 loads since the aliasing code will allow all entries in the
6029 ld_motion list to not-alias itself. If we miss a load, we will get
6030 the wrong value since gcse might common it and we won't know to
6031 fix it up. */
6032
6033 static void
6034 invalidate_any_buried_refs (x)
6035 rtx x;
6036 {
6037 const char * fmt;
6038 int i,j;
6039 struct ls_expr * ptr;
6040
6041 /* Invalidate it in the list. */
6042 if (GET_CODE (x) == MEM && simple_mem (x))
6043 {
6044 ptr = ldst_entry (x);
6045 ptr->invalid = 1;
6046 }
6047
6048 /* Recursively process the insn. */
6049 fmt = GET_RTX_FORMAT (GET_CODE (x));
6050
6051 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6052 {
6053 if (fmt[i] == 'e')
6054 invalidate_any_buried_refs (XEXP (x, i));
6055 else if (fmt[i] == 'E')
6056 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6057 invalidate_any_buried_refs (XVECEXP (x, i, j));
6058 }
6059 }
6060
6061 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6062 being defined as MEM loads and stores to symbols, with no
6063 side effects and no registers in the expression. If there are any
6064 uses/defs which don't match this criteria, it is invalidated and
6065 trimmed out later. */
6066
6067 static void
6068 compute_ld_motion_mems ()
6069 {
6070 struct ls_expr * ptr;
6071 int bb;
6072 rtx insn;
6073
6074 pre_ldst_mems = NULL;
6075
6076 for (bb = 0; bb < n_basic_blocks; bb++)
6077 {
6078 for (insn = BLOCK_HEAD (bb);
6079 insn && insn != NEXT_INSN (BLOCK_END (bb));
6080 insn = NEXT_INSN (insn))
6081 {
6082 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6083 {
6084 if (GET_CODE (PATTERN (insn)) == SET)
6085 {
6086 rtx src = SET_SRC (PATTERN (insn));
6087 rtx dest = SET_DEST (PATTERN (insn));
6088
6089 /* Check for a simple LOAD... */
6090 if (GET_CODE (src) == MEM && simple_mem (src))
6091 {
6092 ptr = ldst_entry (src);
6093 if (GET_CODE (dest) == REG)
6094 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6095 else
6096 ptr->invalid = 1;
6097 }
6098 else
6099 {
6100 /* Make sure there isn't a buried load somewhere. */
6101 invalidate_any_buried_refs (src);
6102 }
6103
6104 /* Check for stores. Don't worry about aliased ones, they
6105 will block any movement we might do later. We only care
6106 about this exact pattern since those are the only
6107 circumstance that we will ignore the aliasing info. */
6108 if (GET_CODE (dest) == MEM && simple_mem (dest))
6109 {
6110 ptr = ldst_entry (dest);
6111
6112 if (GET_CODE (src) != MEM
6113 && GET_CODE (src) != ASM_OPERANDS)
6114 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6115 else
6116 ptr->invalid = 1;
6117 }
6118 }
6119 else
6120 invalidate_any_buried_refs (PATTERN (insn));
6121 }
6122 }
6123 }
6124 }
6125
6126 /* Remove any references that have been either invalidated or are not in the
6127 expression list for pre gcse. */
6128
6129 static void
6130 trim_ld_motion_mems ()
6131 {
6132 struct ls_expr * last = NULL;
6133 struct ls_expr * ptr = first_ls_expr ();
6134
6135 while (ptr != NULL)
6136 {
6137 int del = ptr->invalid;
6138 struct expr * expr = NULL;
6139
6140 /* Delete if entry has been made invalid. */
6141 if (!del)
6142 {
6143 unsigned int i;
6144
6145 del = 1;
6146 /* Delete if we cannot find this mem in the expression list. */
6147 for (i = 0; i < expr_hash_table_size && del; i++)
6148 {
6149 for (expr = expr_hash_table[i];
6150 expr != NULL;
6151 expr = expr->next_same_hash)
6152 if (expr_equiv_p (expr->expr, ptr->pattern))
6153 {
6154 del = 0;
6155 break;
6156 }
6157 }
6158 }
6159
6160 if (del)
6161 {
6162 if (last != NULL)
6163 {
6164 last->next = ptr->next;
6165 free_ldst_entry (ptr);
6166 ptr = last->next;
6167 }
6168 else
6169 {
6170 pre_ldst_mems = pre_ldst_mems->next;
6171 free_ldst_entry (ptr);
6172 ptr = pre_ldst_mems;
6173 }
6174 }
6175 else
6176 {
6177 /* Set the expression field if we are keeping it. */
6178 last = ptr;
6179 ptr->expr = expr;
6180 ptr = ptr->next;
6181 }
6182 }
6183
6184 /* Show the world what we've found. */
6185 if (gcse_file && pre_ldst_mems != NULL)
6186 print_ldst_list (gcse_file);
6187 }
6188
6189 /* This routine will take an expression which we are replacing with
6190 a reaching register, and update any stores that are needed if
6191 that expression is in the ld_motion list. Stores are updated by
6192 copying their SRC to the reaching register, and then storeing
6193 the reaching register into the store location. These keeps the
6194 correct value in the reaching register for the loads. */
6195
6196 static void
6197 update_ld_motion_stores (expr)
6198 struct expr * expr;
6199 {
6200 struct ls_expr * mem_ptr;
6201
6202 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6203 {
6204 /* We can try to find just the REACHED stores, but is shouldn't
6205 matter to set the reaching reg everywhere... some might be
6206 dead and should be eliminated later. */
6207
6208 /* We replace SET mem = expr with
6209 SET reg = expr
6210 SET mem = reg , where reg is the
6211 reaching reg used in the load. */
6212 rtx list = mem_ptr->stores;
6213
6214 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6215 {
6216 rtx insn = XEXP (list, 0);
6217 rtx pat = PATTERN (insn);
6218 rtx src = SET_SRC (pat);
6219 rtx reg = expr->reaching_reg;
6220 rtx copy, new;
6221
6222 /* If we've already copied it, continue. */
6223 if (expr->reaching_reg == src)
6224 continue;
6225
6226 if (gcse_file)
6227 {
6228 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6229 print_rtl (gcse_file, expr->reaching_reg);
6230 fprintf (gcse_file, ":\n ");
6231 print_inline_rtx (gcse_file, insn, 8);
6232 fprintf (gcse_file, "\n");
6233 }
6234
6235 copy = gen_move_insn ( reg, SET_SRC (pat));
6236 new = emit_insn_before (copy, insn);
6237 record_one_set (REGNO (reg), new);
6238 SET_SRC (pat) = reg;
6239
6240 /* un-recognize this pattern since it's probably different now. */
6241 INSN_CODE (insn) = -1;
6242 gcse_create_count++;
6243 }
6244 }
6245 }
6246 \f
6247 /* Store motion code. */
6248
6249 /* This is used to communicate the target bitvector we want to use in the
6250 reg_set_info routine when called via the note_stores mechanism. */
6251 static sbitmap * regvec;
6252
6253 /* Used in computing the reverse edge graph bit vectors. */
6254 static sbitmap * st_antloc;
6255
6256 /* Global holding the number of store expressions we are dealing with. */
6257 static int num_stores;
6258
6259 /* Checks to set if we need to mark a register set. Called from note_stores. */
6260
6261 static void
6262 reg_set_info (dest, setter, data)
6263 rtx dest, setter ATTRIBUTE_UNUSED;
6264 void * data ATTRIBUTE_UNUSED;
6265 {
6266 if (GET_CODE (dest) == SUBREG)
6267 dest = SUBREG_REG (dest);
6268
6269 if (GET_CODE (dest) == REG)
6270 SET_BIT (*regvec, REGNO (dest));
6271 }
6272
6273 /* Return non-zero if the register operands of expression X are killed
6274 anywhere in basic block BB. */
6275
6276 static int
6277 store_ops_ok (x, bb)
6278 rtx x;
6279 basic_block bb;
6280 {
6281 int i;
6282 enum rtx_code code;
6283 const char * fmt;
6284
6285 /* Repeat is used to turn tail-recursion into iteration. */
6286 repeat:
6287
6288 if (x == 0)
6289 return 1;
6290
6291 code = GET_CODE (x);
6292 switch (code)
6293 {
6294 case REG:
6295 /* If a reg has changed after us in this
6296 block, the operand has been killed. */
6297 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6298
6299 case MEM:
6300 x = XEXP (x, 0);
6301 goto repeat;
6302
6303 case PRE_DEC:
6304 case PRE_INC:
6305 case POST_DEC:
6306 case POST_INC:
6307 return 0;
6308
6309 case PC:
6310 case CC0: /*FIXME*/
6311 case CONST:
6312 case CONST_INT:
6313 case CONST_DOUBLE:
6314 case SYMBOL_REF:
6315 case LABEL_REF:
6316 case ADDR_VEC:
6317 case ADDR_DIFF_VEC:
6318 return 1;
6319
6320 default:
6321 break;
6322 }
6323
6324 i = GET_RTX_LENGTH (code) - 1;
6325 fmt = GET_RTX_FORMAT (code);
6326
6327 for (; i >= 0; i--)
6328 {
6329 if (fmt[i] == 'e')
6330 {
6331 rtx tem = XEXP (x, i);
6332
6333 /* If we are about to do the last recursive call
6334 needed at this level, change it into iteration.
6335 This function is called enough to be worth it. */
6336 if (i == 0)
6337 {
6338 x = tem;
6339 goto repeat;
6340 }
6341
6342 if (! store_ops_ok (tem, bb))
6343 return 0;
6344 }
6345 else if (fmt[i] == 'E')
6346 {
6347 int j;
6348
6349 for (j = 0; j < XVECLEN (x, i); j++)
6350 {
6351 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6352 return 0;
6353 }
6354 }
6355 }
6356
6357 return 1;
6358 }
6359
6360 /* Determine whether insn is MEM store pattern that we will consider moving. */
6361
6362 static void
6363 find_moveable_store (insn)
6364 rtx insn;
6365 {
6366 struct ls_expr * ptr;
6367 rtx dest = PATTERN (insn);
6368
6369 if (GET_CODE (dest) != SET
6370 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6371 return;
6372
6373 dest = SET_DEST (dest);
6374
6375 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6376 || GET_MODE (dest) == BLKmode)
6377 return;
6378
6379 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
6380 return;
6381
6382 if (rtx_varies_p (XEXP (dest, 0), 0))
6383 return;
6384
6385 ptr = ldst_entry (dest);
6386 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6387 }
6388
6389 /* Perform store motion. Much like gcse, except we move expressions the
6390 other way by looking at the flowgraph in reverse. */
6391
6392 static int
6393 compute_store_table ()
6394 {
6395 int bb, ret;
6396 unsigned regno;
6397 rtx insn, pat;
6398
6399 max_gcse_regno = max_reg_num ();
6400
6401 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
6402 max_gcse_regno);
6403 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
6404 pre_ldst_mems = 0;
6405
6406 /* Find all the stores we care about. */
6407 for (bb = 0; bb < n_basic_blocks; bb++)
6408 {
6409 regvec = & (reg_set_in_block[bb]);
6410 for (insn = BLOCK_END (bb);
6411 insn && insn != PREV_INSN (BLOCK_HEAD (bb));
6412 insn = PREV_INSN (insn))
6413 {
6414 /* Ignore anything that is not a normal insn. */
6415 if (! INSN_P (insn))
6416 continue;
6417
6418 if (GET_CODE (insn) == CALL_INSN)
6419 {
6420 bool clobbers_all = false;
6421 #ifdef NON_SAVING_SETJMP
6422 if (NON_SAVING_SETJMP
6423 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
6424 clobbers_all = true;
6425 #endif
6426
6427 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6428 if (clobbers_all
6429 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
6430 SET_BIT (reg_set_in_block[bb], regno);
6431 }
6432
6433 pat = PATTERN (insn);
6434 note_stores (pat, reg_set_info, NULL);
6435
6436 /* Now that we've marked regs, look for stores. */
6437 if (GET_CODE (pat) == SET)
6438 find_moveable_store (insn);
6439 }
6440 }
6441
6442 ret = enumerate_ldsts ();
6443
6444 if (gcse_file)
6445 {
6446 fprintf (gcse_file, "Store Motion Expressions.\n");
6447 print_ldst_list (gcse_file);
6448 }
6449
6450 return ret;
6451 }
6452
6453 /* Check to see if the load X is aliased with STORE_PATTERN. */
6454
6455 static int
6456 load_kills_store (x, store_pattern)
6457 rtx x, store_pattern;
6458 {
6459 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
6460 return 1;
6461 return 0;
6462 }
6463
6464 /* Go through the entire insn X, looking for any loads which might alias
6465 STORE_PATTERN. Return 1 if found. */
6466
6467 static int
6468 find_loads (x, store_pattern)
6469 rtx x, store_pattern;
6470 {
6471 const char * fmt;
6472 int i,j;
6473 int ret = 0;
6474
6475 if (!x)
6476 return 0;
6477
6478 if (GET_CODE (x) == SET)
6479 x = SET_SRC (x);
6480
6481 if (GET_CODE (x) == MEM)
6482 {
6483 if (load_kills_store (x, store_pattern))
6484 return 1;
6485 }
6486
6487 /* Recursively process the insn. */
6488 fmt = GET_RTX_FORMAT (GET_CODE (x));
6489
6490 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
6491 {
6492 if (fmt[i] == 'e')
6493 ret |= find_loads (XEXP (x, i), store_pattern);
6494 else if (fmt[i] == 'E')
6495 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6496 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
6497 }
6498 return ret;
6499 }
6500
6501 /* Check if INSN kills the store pattern X (is aliased with it).
6502 Return 1 if it it does. */
6503
6504 static int
6505 store_killed_in_insn (x, insn)
6506 rtx x, insn;
6507 {
6508 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6509 return 0;
6510
6511 if (GET_CODE (insn) == CALL_INSN)
6512 {
6513 if (CONST_OR_PURE_CALL_P (insn))
6514 return 0;
6515 else
6516 return 1;
6517 }
6518
6519 if (GET_CODE (PATTERN (insn)) == SET)
6520 {
6521 rtx pat = PATTERN (insn);
6522 /* Check for memory stores to aliased objects. */
6523 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
6524 /* pretend its a load and check for aliasing. */
6525 if (find_loads (SET_DEST (pat), x))
6526 return 1;
6527 return find_loads (SET_SRC (pat), x);
6528 }
6529 else
6530 return find_loads (PATTERN (insn), x);
6531 }
6532
6533 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6534 within basic block BB. */
6535
6536 static int
6537 store_killed_after (x, insn, bb)
6538 rtx x, insn;
6539 basic_block bb;
6540 {
6541 rtx last = bb->end;
6542
6543 if (insn == last)
6544 return 0;
6545
6546 /* Check if the register operands of the store are OK in this block.
6547 Note that if registers are changed ANYWHERE in the block, we'll
6548 decide we can't move it, regardless of whether it changed above
6549 or below the store. This could be improved by checking the register
6550 operands while lookinng for aliasing in each insn. */
6551 if (!store_ops_ok (XEXP (x, 0), bb))
6552 return 1;
6553
6554 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
6555 if (store_killed_in_insn (x, insn))
6556 return 1;
6557
6558 return 0;
6559 }
6560
6561 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
6562 within basic block BB. */
6563 static int
6564 store_killed_before (x, insn, bb)
6565 rtx x, insn;
6566 basic_block bb;
6567 {
6568 rtx first = bb->head;
6569
6570 if (insn == first)
6571 return store_killed_in_insn (x, insn);
6572
6573 /* Check if the register operands of the store are OK in this block.
6574 Note that if registers are changed ANYWHERE in the block, we'll
6575 decide we can't move it, regardless of whether it changed above
6576 or below the store. This could be improved by checking the register
6577 operands while lookinng for aliasing in each insn. */
6578 if (!store_ops_ok (XEXP (x, 0), bb))
6579 return 1;
6580
6581 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
6582 if (store_killed_in_insn (x, insn))
6583 return 1;
6584
6585 return 0;
6586 }
6587
6588 #define ANTIC_STORE_LIST(x) ((x)->loads)
6589 #define AVAIL_STORE_LIST(x) ((x)->stores)
6590
6591 /* Given the table of available store insns at the end of blocks,
6592 determine which ones are not killed by aliasing, and generate
6593 the appropriate vectors for gen and killed. */
6594 static void
6595 build_store_vectors ()
6596 {
6597 basic_block bb;
6598 int b;
6599 rtx insn, st;
6600 struct ls_expr * ptr;
6601
6602 /* Build the gen_vector. This is any store in the table which is not killed
6603 by aliasing later in its block. */
6604 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6605 sbitmap_vector_zero (ae_gen, n_basic_blocks);
6606
6607 st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6608 sbitmap_vector_zero (st_antloc, n_basic_blocks);
6609
6610 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6611 {
6612 /* Put all the stores into either the antic list, or the avail list,
6613 or both. */
6614 rtx store_list = ptr->stores;
6615 ptr->stores = NULL_RTX;
6616
6617 for (st = store_list; st != NULL; st = XEXP (st, 1))
6618 {
6619 insn = XEXP (st, 0);
6620 bb = BLOCK_FOR_INSN (insn);
6621
6622 if (!store_killed_after (ptr->pattern, insn, bb))
6623 {
6624 /* If we've already seen an availale expression in this block,
6625 we can delete the one we saw already (It occurs earlier in
6626 the block), and replace it with this one). We'll copy the
6627 old SRC expression to an unused register in case there
6628 are any side effects. */
6629 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6630 {
6631 /* Find previous store. */
6632 rtx st;
6633 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
6634 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
6635 break;
6636 if (st)
6637 {
6638 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6639 if (gcse_file)
6640 fprintf(gcse_file, "Removing redundant store:\n");
6641 replace_store_insn (r, XEXP (st, 0), bb);
6642 XEXP (st, 0) = insn;
6643 continue;
6644 }
6645 }
6646 SET_BIT (ae_gen[bb->index], ptr->index);
6647 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6648 AVAIL_STORE_LIST (ptr));
6649 }
6650
6651 if (!store_killed_before (ptr->pattern, insn, bb))
6652 {
6653 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
6654 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6655 ANTIC_STORE_LIST (ptr));
6656 }
6657 }
6658
6659 /* Free the original list of store insns. */
6660 free_INSN_LIST_list (&store_list);
6661 }
6662
6663 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6664 sbitmap_vector_zero (ae_kill, n_basic_blocks);
6665
6666 transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
6667 sbitmap_vector_zero (transp, n_basic_blocks);
6668
6669 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6670 for (b = 0; b < n_basic_blocks; b++)
6671 {
6672 if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
6673 {
6674 /* The anticipatable expression is not killed if it's gen'd. */
6675 /*
6676 We leave this check out for now. If we have a code sequence
6677 in a block which looks like:
6678 ST MEMa = x
6679 L y = MEMa
6680 ST MEMa = z
6681 We should flag this as having an ANTIC expression, NOT
6682 transparent, NOT killed, and AVAIL.
6683 Unfortunately, since we haven't re-written all loads to
6684 use the reaching reg, we'll end up doing an incorrect
6685 Load in the middle here if we push the store down. It happens in
6686 gcc.c-torture/execute/960311-1.c with -O3
6687 If we always kill it in this case, we'll sometimes do
6688 uneccessary work, but it shouldn't actually hurt anything.
6689 if (!TEST_BIT (ae_gen[b], ptr->index)). */
6690 SET_BIT (ae_kill[b], ptr->index);
6691 }
6692 else
6693 SET_BIT (transp[b], ptr->index);
6694 }
6695
6696 /* Any block with no exits calls some non-returning function, so
6697 we better mark the store killed here, or we might not store to
6698 it at all. If we knew it was abort, we wouldn't have to store,
6699 but we don't know that for sure. */
6700 if (gcse_file)
6701 {
6702 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
6703 print_ldst_list (gcse_file);
6704 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
6705 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
6706 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
6707 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
6708 }
6709 }
6710
6711 /* Insert an instruction at the begining of a basic block, and update
6712 the BLOCK_HEAD if needed. */
6713
6714 static void
6715 insert_insn_start_bb (insn, bb)
6716 rtx insn;
6717 basic_block bb;
6718 {
6719 /* Insert at start of successor block. */
6720 rtx prev = PREV_INSN (bb->head);
6721 rtx before = bb->head;
6722 while (before != 0)
6723 {
6724 if (GET_CODE (before) != CODE_LABEL
6725 && (GET_CODE (before) != NOTE
6726 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6727 break;
6728 prev = before;
6729 if (prev == bb->end)
6730 break;
6731 before = NEXT_INSN (before);
6732 }
6733
6734 insn = emit_insn_after (insn, prev);
6735
6736 if (gcse_file)
6737 {
6738 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6739 bb->index);
6740 print_inline_rtx (gcse_file, insn, 6);
6741 fprintf (gcse_file, "\n");
6742 }
6743 }
6744
6745 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6746 the memory reference, and E is the edge to insert it on. Returns non-zero
6747 if an edge insertion was performed. */
6748
6749 static int
6750 insert_store (expr, e)
6751 struct ls_expr * expr;
6752 edge e;
6753 {
6754 rtx reg, insn;
6755 basic_block bb;
6756 edge tmp;
6757
6758 /* We did all the deleted before this insert, so if we didn't delete a
6759 store, then we haven't set the reaching reg yet either. */
6760 if (expr->reaching_reg == NULL_RTX)
6761 return 0;
6762
6763 reg = expr->reaching_reg;
6764 insn = gen_move_insn (expr->pattern, reg);
6765
6766 /* If we are inserting this expression on ALL predecessor edges of a BB,
6767 insert it at the start of the BB, and reset the insert bits on the other
6768 edges so we don't try to insert it on the other edges. */
6769 bb = e->dest;
6770 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6771 {
6772 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6773 if (index == EDGE_INDEX_NO_EDGE)
6774 abort ();
6775 if (! TEST_BIT (pre_insert_map[index], expr->index))
6776 break;
6777 }
6778
6779 /* If tmp is NULL, we found an insertion on every edge, blank the
6780 insertion vector for these edges, and insert at the start of the BB. */
6781 if (!tmp && bb != EXIT_BLOCK_PTR)
6782 {
6783 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
6784 {
6785 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6786 RESET_BIT (pre_insert_map[index], expr->index);
6787 }
6788 insert_insn_start_bb (insn, bb);
6789 return 0;
6790 }
6791
6792 /* We can't insert on this edge, so we'll insert at the head of the
6793 successors block. See Morgan, sec 10.5. */
6794 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
6795 {
6796 insert_insn_start_bb (insn, bb);
6797 return 0;
6798 }
6799
6800 insert_insn_on_edge (insn, e);
6801
6802 if (gcse_file)
6803 {
6804 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6805 e->src->index, e->dest->index);
6806 print_inline_rtx (gcse_file, insn, 6);
6807 fprintf (gcse_file, "\n");
6808 }
6809
6810 return 1;
6811 }
6812
6813 /* This routine will replace a store with a SET to a specified register. */
6814
6815 static void
6816 replace_store_insn (reg, del, bb)
6817 rtx reg, del;
6818 basic_block bb;
6819 {
6820 rtx insn;
6821
6822 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
6823 insn = emit_insn_after (insn, del);
6824
6825 if (gcse_file)
6826 {
6827 fprintf (gcse_file,
6828 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6829 print_inline_rtx (gcse_file, del, 6);
6830 fprintf(gcse_file, "\nSTORE MOTION replaced with insn:\n ");
6831 print_inline_rtx (gcse_file, insn, 6);
6832 fprintf(gcse_file, "\n");
6833 }
6834
6835 delete_insn (del);
6836 }
6837
6838
6839 /* Delete a store, but copy the value that would have been stored into
6840 the reaching_reg for later storing. */
6841
6842 static void
6843 delete_store (expr, bb)
6844 struct ls_expr * expr;
6845 basic_block bb;
6846 {
6847 rtx reg, i, del;
6848
6849 if (expr->reaching_reg == NULL_RTX)
6850 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6851
6852
6853 /* If there is more than 1 store, the earlier ones will be dead,
6854 but it doesn't hurt to replace them here. */
6855 reg = expr->reaching_reg;
6856
6857 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6858 {
6859 del = XEXP (i, 0);
6860 if (BLOCK_FOR_INSN (del) == bb)
6861 {
6862 /* We know there is only one since we deleted redundant
6863 ones during the available computation. */
6864 replace_store_insn (reg, del, bb);
6865 break;
6866 }
6867 }
6868 }
6869
6870 /* Free memory used by store motion. */
6871
6872 static void
6873 free_store_memory ()
6874 {
6875 free_ldst_mems ();
6876
6877 if (ae_gen)
6878 sbitmap_vector_free (ae_gen);
6879 if (ae_kill)
6880 sbitmap_vector_free (ae_kill);
6881 if (transp)
6882 sbitmap_vector_free (transp);
6883 if (st_antloc)
6884 sbitmap_vector_free (st_antloc);
6885 if (pre_insert_map)
6886 sbitmap_vector_free (pre_insert_map);
6887 if (pre_delete_map)
6888 sbitmap_vector_free (pre_delete_map);
6889 if (reg_set_in_block)
6890 sbitmap_vector_free (reg_set_in_block);
6891
6892 ae_gen = ae_kill = transp = st_antloc = NULL;
6893 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6894 }
6895
6896 /* Perform store motion. Much like gcse, except we move expressions the
6897 other way by looking at the flowgraph in reverse. */
6898
6899 static void
6900 store_motion ()
6901 {
6902 int x;
6903 struct ls_expr * ptr;
6904 int update_flow = 0;
6905
6906 if (gcse_file)
6907 {
6908 fprintf (gcse_file, "before store motion\n");
6909 print_rtl (gcse_file, get_insns ());
6910 }
6911
6912
6913 init_alias_analysis ();
6914
6915 /* Find all the stores that are live to the end of their block. */
6916 num_stores = compute_store_table ();
6917 if (num_stores == 0)
6918 {
6919 sbitmap_vector_free (reg_set_in_block);
6920 end_alias_analysis ();
6921 return;
6922 }
6923
6924 /* Now compute whats actually available to move. */
6925 add_noreturn_fake_exit_edges ();
6926 build_store_vectors ();
6927
6928 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
6929 st_antloc, ae_kill, &pre_insert_map,
6930 &pre_delete_map);
6931
6932 /* Now we want to insert the new stores which are going to be needed. */
6933 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6934 {
6935 for (x = 0; x < n_basic_blocks; x++)
6936 if (TEST_BIT (pre_delete_map[x], ptr->index))
6937 delete_store (ptr, BASIC_BLOCK (x));
6938
6939 for (x = 0; x < NUM_EDGES (edge_list); x++)
6940 if (TEST_BIT (pre_insert_map[x], ptr->index))
6941 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6942 }
6943
6944 if (update_flow)
6945 commit_edge_insertions ();
6946
6947 free_store_memory ();
6948 free_edge_list (edge_list);
6949 remove_fake_edges ();
6950 end_alias_analysis ();
6951 }