]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
Merge from pch-branch up to tag pch-commit-20020603.
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "toplev.h"
149
150 #include "rtl.h"
151 #include "tm_p.h"
152 #include "regs.h"
153 #include "hard-reg-set.h"
154 #include "flags.h"
155 #include "real.h"
156 #include "insn-config.h"
157 #include "recog.h"
158 #include "basic-block.h"
159 #include "output.h"
160 #include "function.h"
161 #include "expr.h"
162 #include "except.h"
163 #include "ggc.h"
164 #include "params.h"
165
166 #include "obstack.h"
167 #define obstack_chunk_alloc gmalloc
168 #define obstack_chunk_free free
169
170 /* Propagate flow information through back edges and thus enable PRE's
171 moving loop invariant calculations out of loops.
172
173 Originally this tended to create worse overall code, but several
174 improvements during the development of PRE seem to have made following
175 back edges generally a win.
176
177 Note much of the loop invariant code motion done here would normally
178 be done by loop.c, which has more heuristics for when to move invariants
179 out of loops. At some point we might need to move some of those
180 heuristics into gcse.c. */
181 #define FOLLOW_BACK_EDGES 1
182
183 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
184 are a superset of those done by GCSE.
185
186 We perform the following steps:
187
188 1) Compute basic block information.
189
190 2) Compute table of places where registers are set.
191
192 3) Perform copy/constant propagation.
193
194 4) Perform global cse.
195
196 5) Perform another pass of copy/constant propagation.
197
198 Two passes of copy/constant propagation are done because the first one
199 enables more GCSE and the second one helps to clean up the copies that
200 GCSE creates. This is needed more for PRE than for Classic because Classic
201 GCSE will try to use an existing register containing the common
202 subexpression rather than create a new one. This is harder to do for PRE
203 because of the code motion (which Classic GCSE doesn't do).
204
205 Expressions we are interested in GCSE-ing are of the form
206 (set (pseudo-reg) (expression)).
207 Function want_to_gcse_p says what these are.
208
209 PRE handles moving invariant expressions out of loops (by treating them as
210 partially redundant).
211
212 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
213 assignment) based GVN (global value numbering). L. T. Simpson's paper
214 (Rice University) on value numbering is a useful reference for this.
215
216 **********************
217
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
222 the expense.
223
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228
229 It was found doing copy propagation between each pass enables further
230 substitutions.
231
232 PRE is quite expensive in complicated functions because the DFA can take
233 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
234 be modified if one wants to experiment.
235
236 **********************
237
238 The steps for PRE are:
239
240 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
241
242 2) Perform the data flow analysis for PRE.
243
244 3) Delete the redundant instructions
245
246 4) Insert the required copies [if any] that make the partially
247 redundant instructions fully redundant.
248
249 5) For other reaching expressions, insert an instruction to copy the value
250 to a newly created pseudo that will reach the redundant instruction.
251
252 The deletion is done first so that when we do insertions we
253 know which pseudo reg to use.
254
255 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
256 argue it is not. The number of iterations for the algorithm to converge
257 is typically 2-4 so I don't view it as that expensive (relatively speaking).
258
259 PRE GCSE depends heavily on the second CSE pass to clean up the copies
260 we create. To make an expression reach the place where it's redundant,
261 the result of the expression is copied to a new register, and the redundant
262 expression is deleted by replacing it with this new register. Classic GCSE
263 doesn't have this problem as much as it computes the reaching defs of
264 each register in each block and thus can try to use an existing register.
265
266 **********************
267
268 A fair bit of simplicity is created by creating small functions for simple
269 tasks, even when the function is only called in one place. This may
270 measurably slow things down [or may not] by creating more function call
271 overhead than is necessary. The source is laid out so that it's trivial
272 to make the affected functions inline so that one can measure what speed
273 up, if any, can be achieved, and maybe later when things settle things can
274 be rearranged.
275
276 Help stamp out big monolithic functions! */
277 \f
278 /* GCSE global vars. */
279
280 /* -dG dump file. */
281 static FILE *gcse_file;
282
283 /* Note whether or not we should run jump optimization after gcse. We
284 want to do this for two cases.
285
286 * If we changed any jumps via cprop.
287
288 * If we added any labels via edge splitting. */
289
290 static int run_jump_opt_after_gcse;
291
292 /* Bitmaps are normally not included in debugging dumps.
293 However it's useful to be able to print them from GDB.
294 We could create special functions for this, but it's simpler to
295 just allow passing stderr to the dump_foo fns. Since stderr can
296 be a macro, we store a copy here. */
297 static FILE *debug_stderr;
298
299 /* An obstack for our working variables. */
300 static struct obstack gcse_obstack;
301
302 /* Non-zero for each mode that supports (set (reg) (reg)).
303 This is trivially true for integer and floating point values.
304 It may or may not be true for condition codes. */
305 static char can_copy_p[(int) NUM_MACHINE_MODES];
306
307 /* Non-zero if can_copy_p has been initialized. */
308 static int can_copy_init_p;
309
310 struct reg_use {rtx reg_rtx; };
311
312 /* Hash table of expressions. */
313
314 struct expr
315 {
316 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
317 rtx expr;
318 /* Index in the available expression bitmaps. */
319 int bitmap_index;
320 /* Next entry with the same hash. */
321 struct expr *next_same_hash;
322 /* List of anticipatable occurrences in basic blocks in the function.
323 An "anticipatable occurrence" is one that is the first occurrence in the
324 basic block, the operands are not modified in the basic block prior
325 to the occurrence and the output is not used between the start of
326 the block and the occurrence. */
327 struct occr *antic_occr;
328 /* List of available occurrence in basic blocks in the function.
329 An "available occurrence" is one that is the last occurrence in the
330 basic block and the operands are not modified by following statements in
331 the basic block [including this insn]. */
332 struct occr *avail_occr;
333 /* Non-null if the computation is PRE redundant.
334 The value is the newly created pseudo-reg to record a copy of the
335 expression in all the places that reach the redundant copy. */
336 rtx reaching_reg;
337 };
338
339 /* Occurrence of an expression.
340 There is one per basic block. If a pattern appears more than once the
341 last appearance is used [or first for anticipatable expressions]. */
342
343 struct occr
344 {
345 /* Next occurrence of this expression. */
346 struct occr *next;
347 /* The insn that computes the expression. */
348 rtx insn;
349 /* Non-zero if this [anticipatable] occurrence has been deleted. */
350 char deleted_p;
351 /* Non-zero if this [available] occurrence has been copied to
352 reaching_reg. */
353 /* ??? This is mutually exclusive with deleted_p, so they could share
354 the same byte. */
355 char copied_p;
356 };
357
358 /* Expression and copy propagation hash tables.
359 Each hash table is an array of buckets.
360 ??? It is known that if it were an array of entries, structure elements
361 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
362 not clear whether in the final analysis a sufficient amount of memory would
363 be saved as the size of the available expression bitmaps would be larger
364 [one could build a mapping table without holes afterwards though].
365 Someday I'll perform the computation and figure it out. */
366
367 /* Total size of the expression hash table, in elements. */
368 static unsigned int expr_hash_table_size;
369
370 /* The table itself.
371 This is an array of `expr_hash_table_size' elements. */
372 static struct expr **expr_hash_table;
373
374 /* Total size of the copy propagation hash table, in elements. */
375 static unsigned int set_hash_table_size;
376
377 /* The table itself.
378 This is an array of `set_hash_table_size' elements. */
379 static struct expr **set_hash_table;
380
381 /* Mapping of uids to cuids.
382 Only real insns get cuids. */
383 static int *uid_cuid;
384
385 /* Highest UID in UID_CUID. */
386 static int max_uid;
387
388 /* Get the cuid of an insn. */
389 #ifdef ENABLE_CHECKING
390 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
391 #else
392 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
393 #endif
394
395 /* Number of cuids. */
396 static int max_cuid;
397
398 /* Mapping of cuids to insns. */
399 static rtx *cuid_insn;
400
401 /* Get insn from cuid. */
402 #define CUID_INSN(CUID) (cuid_insn[CUID])
403
404 /* Maximum register number in function prior to doing gcse + 1.
405 Registers created during this pass have regno >= max_gcse_regno.
406 This is named with "gcse" to not collide with global of same name. */
407 static unsigned int max_gcse_regno;
408
409 /* Maximum number of cse-able expressions found. */
410 static int n_exprs;
411
412 /* Maximum number of assignments for copy propagation found. */
413 static int n_sets;
414
415 /* Table of registers that are modified.
416
417 For each register, each element is a list of places where the pseudo-reg
418 is set.
419
420 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
421 requires knowledge of which blocks kill which regs [and thus could use
422 a bitmap instead of the lists `reg_set_table' uses].
423
424 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
425 num-regs) [however perhaps it may be useful to keep the data as is]. One
426 advantage of recording things this way is that `reg_set_table' is fairly
427 sparse with respect to pseudo regs but for hard regs could be fairly dense
428 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
429 up functions like compute_transp since in the case of pseudo-regs we only
430 need to iterate over the number of times a pseudo-reg is set, not over the
431 number of basic blocks [clearly there is a bit of a slow down in the cases
432 where a pseudo is set more than once in a block, however it is believed
433 that the net effect is to speed things up]. This isn't done for hard-regs
434 because recording call-clobbered hard-regs in `reg_set_table' at each
435 function call can consume a fair bit of memory, and iterating over
436 hard-regs stored this way in compute_transp will be more expensive. */
437
438 typedef struct reg_set
439 {
440 /* The next setting of this register. */
441 struct reg_set *next;
442 /* The insn where it was set. */
443 rtx insn;
444 } reg_set;
445
446 static reg_set **reg_set_table;
447
448 /* Size of `reg_set_table'.
449 The table starts out at max_gcse_regno + slop, and is enlarged as
450 necessary. */
451 static int reg_set_table_size;
452
453 /* Amount to grow `reg_set_table' by when it's full. */
454 #define REG_SET_TABLE_SLOP 100
455
456 /* This is a list of expressions which are MEMs and will be used by load
457 or store motion.
458 Load motion tracks MEMs which aren't killed by
459 anything except itself. (ie, loads and stores to a single location).
460 We can then allow movement of these MEM refs with a little special
461 allowance. (all stores copy the same value to the reaching reg used
462 for the loads). This means all values used to store into memory must have
463 no side effects so we can re-issue the setter value.
464 Store Motion uses this structure as an expression table to track stores
465 which look interesting, and might be moveable towards the exit block. */
466
467 struct ls_expr
468 {
469 struct expr * expr; /* Gcse expression reference for LM. */
470 rtx pattern; /* Pattern of this mem. */
471 rtx loads; /* INSN list of loads seen. */
472 rtx stores; /* INSN list of stores seen. */
473 struct ls_expr * next; /* Next in the list. */
474 int invalid; /* Invalid for some reason. */
475 int index; /* If it maps to a bitmap index. */
476 int hash_index; /* Index when in a hash table. */
477 rtx reaching_reg; /* Register to use when re-writing. */
478 };
479
480 /* Head of the list of load/store memory refs. */
481 static struct ls_expr * pre_ldst_mems = NULL;
482
483 /* Bitmap containing one bit for each register in the program.
484 Used when performing GCSE to track which registers have been set since
485 the start of the basic block. */
486 static regset reg_set_bitmap;
487
488 /* For each block, a bitmap of registers set in the block.
489 This is used by expr_killed_p and compute_transp.
490 It is computed during hash table computation and not by compute_sets
491 as it includes registers added since the last pass (or between cprop and
492 gcse) and it's currently not easy to realloc sbitmap vectors. */
493 static sbitmap *reg_set_in_block;
494
495 /* Array, indexed by basic block number for a list of insns which modify
496 memory within that block. */
497 static rtx * modify_mem_list;
498 bitmap modify_mem_list_set;
499
500 /* This array parallels modify_mem_list, but is kept canonicalized. */
501 static rtx * canon_modify_mem_list;
502 bitmap canon_modify_mem_list_set;
503 /* Various variables for statistics gathering. */
504
505 /* Memory used in a pass.
506 This isn't intended to be absolutely precise. Its intent is only
507 to keep an eye on memory usage. */
508 static int bytes_used;
509
510 /* GCSE substitutions made. */
511 static int gcse_subst_count;
512 /* Number of copy instructions created. */
513 static int gcse_create_count;
514 /* Number of constants propagated. */
515 static int const_prop_count;
516 /* Number of copys propagated. */
517 static int copy_prop_count;
518 \f
519 /* These variables are used by classic GCSE.
520 Normally they'd be defined a bit later, but `rd_gen' needs to
521 be declared sooner. */
522
523 /* Each block has a bitmap of each type.
524 The length of each blocks bitmap is:
525
526 max_cuid - for reaching definitions
527 n_exprs - for available expressions
528
529 Thus we view the bitmaps as 2 dimensional arrays. i.e.
530 rd_kill[block_num][cuid_num]
531 ae_kill[block_num][expr_num] */
532
533 /* For reaching defs */
534 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
535
536 /* for available exprs */
537 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
538
539 /* Objects of this type are passed around by the null-pointer check
540 removal routines. */
541 struct null_pointer_info
542 {
543 /* The basic block being processed. */
544 basic_block current_block;
545 /* The first register to be handled in this pass. */
546 unsigned int min_reg;
547 /* One greater than the last register to be handled in this pass. */
548 unsigned int max_reg;
549 sbitmap *nonnull_local;
550 sbitmap *nonnull_killed;
551 };
552 \f
553 static void compute_can_copy PARAMS ((void));
554 static char *gmalloc PARAMS ((unsigned int));
555 static char *grealloc PARAMS ((char *, unsigned int));
556 static char *gcse_alloc PARAMS ((unsigned long));
557 static void alloc_gcse_mem PARAMS ((rtx));
558 static void free_gcse_mem PARAMS ((void));
559 static void alloc_reg_set_mem PARAMS ((int));
560 static void free_reg_set_mem PARAMS ((void));
561 static int get_bitmap_width PARAMS ((int, int, int));
562 static void record_one_set PARAMS ((int, rtx));
563 static void record_set_info PARAMS ((rtx, rtx, void *));
564 static void compute_sets PARAMS ((rtx));
565 static void hash_scan_insn PARAMS ((rtx, int, int));
566 static void hash_scan_set PARAMS ((rtx, rtx, int));
567 static void hash_scan_clobber PARAMS ((rtx, rtx));
568 static void hash_scan_call PARAMS ((rtx, rtx));
569 static int want_to_gcse_p PARAMS ((rtx));
570 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
571 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
572 static int oprs_available_p PARAMS ((rtx, rtx));
573 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
574 int, int));
575 static void insert_set_in_table PARAMS ((rtx, rtx));
576 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
577 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
578 static unsigned int hash_string_1 PARAMS ((const char *));
579 static unsigned int hash_set PARAMS ((int, int));
580 static int expr_equiv_p PARAMS ((rtx, rtx));
581 static void record_last_reg_set_info PARAMS ((rtx, int));
582 static void record_last_mem_set_info PARAMS ((rtx));
583 static void record_last_set_info PARAMS ((rtx, rtx, void *));
584 static void compute_hash_table PARAMS ((int));
585 static void alloc_set_hash_table PARAMS ((int));
586 static void free_set_hash_table PARAMS ((void));
587 static void compute_set_hash_table PARAMS ((void));
588 static void alloc_expr_hash_table PARAMS ((unsigned int));
589 static void free_expr_hash_table PARAMS ((void));
590 static void compute_expr_hash_table PARAMS ((void));
591 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
592 int, int));
593 static struct expr *lookup_expr PARAMS ((rtx));
594 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
595 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
596 static void reset_opr_set_tables PARAMS ((void));
597 static int oprs_not_set_p PARAMS ((rtx, rtx));
598 static void mark_call PARAMS ((rtx));
599 static void mark_set PARAMS ((rtx, rtx));
600 static void mark_clobber PARAMS ((rtx, rtx));
601 static void mark_oprs_set PARAMS ((rtx));
602 static void alloc_cprop_mem PARAMS ((int, int));
603 static void free_cprop_mem PARAMS ((void));
604 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
605 static void compute_transpout PARAMS ((void));
606 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
607 int));
608 static void compute_cprop_data PARAMS ((void));
609 static void find_used_regs PARAMS ((rtx *, void *));
610 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
611 static struct expr *find_avail_set PARAMS ((int, rtx));
612 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx, rtx));
613 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
614 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
615 static void canon_list_insert PARAMS ((rtx, rtx, void *));
616 static int cprop_insn PARAMS ((basic_block, rtx, int));
617 static int cprop PARAMS ((int));
618 static int one_cprop_pass PARAMS ((int, int));
619 static struct expr *find_bypass_set PARAMS ((int, int));
620 static int bypass_block PARAMS ((basic_block, rtx, rtx));
621 static int bypass_conditional_jumps PARAMS ((void));
622 static void alloc_pre_mem PARAMS ((int, int));
623 static void free_pre_mem PARAMS ((void));
624 static void compute_pre_data PARAMS ((void));
625 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
626 basic_block));
627 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
628 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
629 static void pre_insert_copies PARAMS ((void));
630 static int pre_delete PARAMS ((void));
631 static int pre_gcse PARAMS ((void));
632 static int one_pre_gcse_pass PARAMS ((int));
633 static void add_label_notes PARAMS ((rtx, rtx));
634 static void alloc_code_hoist_mem PARAMS ((int, int));
635 static void free_code_hoist_mem PARAMS ((void));
636 static void compute_code_hoist_vbeinout PARAMS ((void));
637 static void compute_code_hoist_data PARAMS ((void));
638 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
639 char *));
640 static void hoist_code PARAMS ((void));
641 static int one_code_hoisting_pass PARAMS ((void));
642 static void alloc_rd_mem PARAMS ((int, int));
643 static void free_rd_mem PARAMS ((void));
644 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
645 static void compute_kill_rd PARAMS ((void));
646 static void compute_rd PARAMS ((void));
647 static void alloc_avail_expr_mem PARAMS ((int, int));
648 static void free_avail_expr_mem PARAMS ((void));
649 static void compute_ae_gen PARAMS ((void));
650 static int expr_killed_p PARAMS ((rtx, basic_block));
651 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
652 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
653 basic_block, int));
654 static rtx computing_insn PARAMS ((struct expr *, rtx));
655 static int def_reaches_here_p PARAMS ((rtx, rtx));
656 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
657 static int handle_avail_expr PARAMS ((rtx, struct expr *));
658 static int classic_gcse PARAMS ((void));
659 static int one_classic_gcse_pass PARAMS ((int));
660 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
661 static void delete_null_pointer_checks_1 PARAMS ((unsigned int *,
662 sbitmap *, sbitmap *,
663 struct null_pointer_info *));
664 static rtx process_insert_insn PARAMS ((struct expr *));
665 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
666 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
667 basic_block, int, char *));
668 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
669 basic_block, char *));
670 static struct ls_expr * ldst_entry PARAMS ((rtx));
671 static void free_ldst_entry PARAMS ((struct ls_expr *));
672 static void free_ldst_mems PARAMS ((void));
673 static void print_ldst_list PARAMS ((FILE *));
674 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
675 static int enumerate_ldsts PARAMS ((void));
676 static inline struct ls_expr * first_ls_expr PARAMS ((void));
677 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
678 static int simple_mem PARAMS ((rtx));
679 static void invalidate_any_buried_refs PARAMS ((rtx));
680 static void compute_ld_motion_mems PARAMS ((void));
681 static void trim_ld_motion_mems PARAMS ((void));
682 static void update_ld_motion_stores PARAMS ((struct expr *));
683 static void reg_set_info PARAMS ((rtx, rtx, void *));
684 static int store_ops_ok PARAMS ((rtx, basic_block));
685 static void find_moveable_store PARAMS ((rtx));
686 static int compute_store_table PARAMS ((void));
687 static int load_kills_store PARAMS ((rtx, rtx));
688 static int find_loads PARAMS ((rtx, rtx));
689 static int store_killed_in_insn PARAMS ((rtx, rtx));
690 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
691 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
692 static void build_store_vectors PARAMS ((void));
693 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
694 static int insert_store PARAMS ((struct ls_expr *, edge));
695 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
696 static void delete_store PARAMS ((struct ls_expr *,
697 basic_block));
698 static void free_store_memory PARAMS ((void));
699 static void store_motion PARAMS ((void));
700 static void free_insn_expr_list_list PARAMS ((rtx *));
701 static void clear_modify_mem_tables PARAMS ((void));
702 static void free_modify_mem_tables PARAMS ((void));
703 static rtx gcse_emit_move_after PARAMS ((rtx, rtx, rtx));
704 \f
705 /* Entry point for global common subexpression elimination.
706 F is the first instruction in the function. */
707
708 int
709 gcse_main (f, file)
710 rtx f;
711 FILE *file;
712 {
713 int changed, pass;
714 /* Bytes used at start of pass. */
715 int initial_bytes_used;
716 /* Maximum number of bytes used by a pass. */
717 int max_pass_bytes;
718 /* Point to release obstack data from for each pass. */
719 char *gcse_obstack_bottom;
720
721 /* Insertion of instructions on edges can create new basic blocks; we
722 need the original basic block count so that we can properly deallocate
723 arrays sized on the number of basic blocks originally in the cfg. */
724 int orig_bb_count;
725 /* We do not construct an accurate cfg in functions which call
726 setjmp, so just punt to be safe. */
727 if (current_function_calls_setjmp)
728 return 0;
729
730 /* Assume that we do not need to run jump optimizations after gcse. */
731 run_jump_opt_after_gcse = 0;
732
733 /* For calling dump_foo fns from gdb. */
734 debug_stderr = stderr;
735 gcse_file = file;
736
737 /* Identify the basic block information for this function, including
738 successors and predecessors. */
739 max_gcse_regno = max_reg_num ();
740
741 if (file)
742 dump_flow_info (file);
743
744 orig_bb_count = n_basic_blocks;
745 /* Return if there's nothing to do. */
746 if (n_basic_blocks <= 1)
747 return 0;
748
749 /* Trying to perform global optimizations on flow graphs which have
750 a high connectivity will take a long time and is unlikely to be
751 particularly useful.
752
753 In normal circumstances a cfg should have about twice as many edges
754 as blocks. But we do not want to punish small functions which have
755 a couple switch statements. So we require a relatively large number
756 of basic blocks and the ratio of edges to blocks to be high. */
757 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
758 {
759 if (warn_disabled_optimization)
760 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
761 n_basic_blocks, n_edges / n_basic_blocks);
762 return 0;
763 }
764
765 /* If allocating memory for the cprop bitmap would take up too much
766 storage it's better just to disable the optimization. */
767 if ((n_basic_blocks
768 * SBITMAP_SET_SIZE (max_gcse_regno)
769 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
770 {
771 if (warn_disabled_optimization)
772 warning ("GCSE disabled: %d basic blocks and %d registers",
773 n_basic_blocks, max_gcse_regno);
774
775 return 0;
776 }
777
778 /* See what modes support reg/reg copy operations. */
779 if (! can_copy_init_p)
780 {
781 compute_can_copy ();
782 can_copy_init_p = 1;
783 }
784
785 gcc_obstack_init (&gcse_obstack);
786 bytes_used = 0;
787
788 /* We need alias. */
789 init_alias_analysis ();
790 /* Record where pseudo-registers are set. This data is kept accurate
791 during each pass. ??? We could also record hard-reg information here
792 [since it's unchanging], however it is currently done during hash table
793 computation.
794
795 It may be tempting to compute MEM set information here too, but MEM sets
796 will be subject to code motion one day and thus we need to compute
797 information about memory sets when we build the hash tables. */
798
799 alloc_reg_set_mem (max_gcse_regno);
800 compute_sets (f);
801
802 pass = 0;
803 initial_bytes_used = bytes_used;
804 max_pass_bytes = 0;
805 gcse_obstack_bottom = gcse_alloc (1);
806 changed = 1;
807 while (changed && pass < MAX_GCSE_PASSES)
808 {
809 changed = 0;
810 if (file)
811 fprintf (file, "GCSE pass %d\n\n", pass + 1);
812
813 /* Initialize bytes_used to the space for the pred/succ lists,
814 and the reg_set_table data. */
815 bytes_used = initial_bytes_used;
816
817 /* Each pass may create new registers, so recalculate each time. */
818 max_gcse_regno = max_reg_num ();
819
820 alloc_gcse_mem (f);
821
822 /* Don't allow constant propagation to modify jumps
823 during this pass. */
824 changed = one_cprop_pass (pass + 1, 0);
825
826 if (optimize_size)
827 changed |= one_classic_gcse_pass (pass + 1);
828 else
829 {
830 changed |= one_pre_gcse_pass (pass + 1);
831 /* We may have just created new basic blocks. Release and
832 recompute various things which are sized on the number of
833 basic blocks. */
834 if (changed)
835 {
836 free_modify_mem_tables ();
837 modify_mem_list
838 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
839 canon_modify_mem_list
840 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
841 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
842 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
843 orig_bb_count = n_basic_blocks;
844 }
845 free_reg_set_mem ();
846 alloc_reg_set_mem (max_reg_num ());
847 compute_sets (f);
848 run_jump_opt_after_gcse = 1;
849 }
850
851 if (max_pass_bytes < bytes_used)
852 max_pass_bytes = bytes_used;
853
854 /* Free up memory, then reallocate for code hoisting. We can
855 not re-use the existing allocated memory because the tables
856 will not have info for the insns or registers created by
857 partial redundancy elimination. */
858 free_gcse_mem ();
859
860 /* It does not make sense to run code hoisting unless we optimizing
861 for code size -- it rarely makes programs faster, and can make
862 them bigger if we did partial redundancy elimination (when optimizing
863 for space, we use a classic gcse algorithm instead of partial
864 redundancy algorithms). */
865 if (optimize_size)
866 {
867 max_gcse_regno = max_reg_num ();
868 alloc_gcse_mem (f);
869 changed |= one_code_hoisting_pass ();
870 free_gcse_mem ();
871
872 if (max_pass_bytes < bytes_used)
873 max_pass_bytes = bytes_used;
874 }
875
876 if (file)
877 {
878 fprintf (file, "\n");
879 fflush (file);
880 }
881
882 obstack_free (&gcse_obstack, gcse_obstack_bottom);
883 pass++;
884 }
885
886 /* Do one last pass of copy propagation, including cprop into
887 conditional jumps. */
888
889 max_gcse_regno = max_reg_num ();
890 alloc_gcse_mem (f);
891 /* This time, go ahead and allow cprop to alter jumps. */
892 one_cprop_pass (pass + 1, 1);
893 free_gcse_mem ();
894
895 if (file)
896 {
897 fprintf (file, "GCSE of %s: %d basic blocks, ",
898 current_function_name, n_basic_blocks);
899 fprintf (file, "%d pass%s, %d bytes\n\n",
900 pass, pass > 1 ? "es" : "", max_pass_bytes);
901 }
902
903 obstack_free (&gcse_obstack, NULL);
904 free_reg_set_mem ();
905 /* We are finished with alias. */
906 end_alias_analysis ();
907 allocate_reg_info (max_reg_num (), FALSE, FALSE);
908
909 /* Store motion disabled until it is fixed. */
910 if (0 && !optimize_size && flag_gcse_sm)
911 store_motion ();
912 /* Record where pseudo-registers are set. */
913 return run_jump_opt_after_gcse;
914 }
915 \f
916 /* Misc. utilities. */
917
918 /* Compute which modes support reg/reg copy operations. */
919
920 static void
921 compute_can_copy ()
922 {
923 int i;
924 #ifndef AVOID_CCMODE_COPIES
925 rtx reg, insn;
926 #endif
927 memset (can_copy_p, 0, NUM_MACHINE_MODES);
928
929 start_sequence ();
930 for (i = 0; i < NUM_MACHINE_MODES; i++)
931 if (GET_MODE_CLASS (i) == MODE_CC)
932 {
933 #ifdef AVOID_CCMODE_COPIES
934 can_copy_p[i] = 0;
935 #else
936 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
937 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
938 if (recog (PATTERN (insn), insn, NULL) >= 0)
939 can_copy_p[i] = 1;
940 #endif
941 }
942 else
943 can_copy_p[i] = 1;
944
945 end_sequence ();
946 }
947 \f
948 /* Cover function to xmalloc to record bytes allocated. */
949
950 static char *
951 gmalloc (size)
952 unsigned int size;
953 {
954 bytes_used += size;
955 return xmalloc (size);
956 }
957
958 /* Cover function to xrealloc.
959 We don't record the additional size since we don't know it.
960 It won't affect memory usage stats much anyway. */
961
962 static char *
963 grealloc (ptr, size)
964 char *ptr;
965 unsigned int size;
966 {
967 return xrealloc (ptr, size);
968 }
969
970 /* Cover function to obstack_alloc.
971 We don't need to record the bytes allocated here since
972 obstack_chunk_alloc is set to gmalloc. */
973
974 static char *
975 gcse_alloc (size)
976 unsigned long size;
977 {
978 return (char *) obstack_alloc (&gcse_obstack, size);
979 }
980
981 /* Allocate memory for the cuid mapping array,
982 and reg/memory set tracking tables.
983
984 This is called at the start of each pass. */
985
986 static void
987 alloc_gcse_mem (f)
988 rtx f;
989 {
990 int i, n;
991 rtx insn;
992
993 /* Find the largest UID and create a mapping from UIDs to CUIDs.
994 CUIDs are like UIDs except they increase monotonically, have no gaps,
995 and only apply to real insns. */
996
997 max_uid = get_max_uid ();
998 n = (max_uid + 1) * sizeof (int);
999 uid_cuid = (int *) gmalloc (n);
1000 memset ((char *) uid_cuid, 0, n);
1001 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1002 {
1003 if (INSN_P (insn))
1004 uid_cuid[INSN_UID (insn)] = i++;
1005 else
1006 uid_cuid[INSN_UID (insn)] = i;
1007 }
1008
1009 /* Create a table mapping cuids to insns. */
1010
1011 max_cuid = i;
1012 n = (max_cuid + 1) * sizeof (rtx);
1013 cuid_insn = (rtx *) gmalloc (n);
1014 memset ((char *) cuid_insn, 0, n);
1015 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1016 if (INSN_P (insn))
1017 CUID_INSN (i++) = insn;
1018
1019 /* Allocate vars to track sets of regs. */
1020 reg_set_bitmap = BITMAP_XMALLOC ();
1021
1022 /* Allocate vars to track sets of regs, memory per block. */
1023 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
1024 max_gcse_regno);
1025 /* Allocate array to keep a list of insns which modify memory in each
1026 basic block. */
1027 modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1028 canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1029 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
1030 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
1031 modify_mem_list_set = BITMAP_XMALLOC ();
1032 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1033 }
1034
1035 /* Free memory allocated by alloc_gcse_mem. */
1036
1037 static void
1038 free_gcse_mem ()
1039 {
1040 free (uid_cuid);
1041 free (cuid_insn);
1042
1043 BITMAP_XFREE (reg_set_bitmap);
1044
1045 sbitmap_vector_free (reg_set_in_block);
1046 free_modify_mem_tables ();
1047 BITMAP_XFREE (modify_mem_list_set);
1048 BITMAP_XFREE (canon_modify_mem_list_set);
1049 }
1050
1051 /* Many of the global optimization algorithms work by solving dataflow
1052 equations for various expressions. Initially, some local value is
1053 computed for each expression in each block. Then, the values across the
1054 various blocks are combined (by following flow graph edges) to arrive at
1055 global values. Conceptually, each set of equations is independent. We
1056 may therefore solve all the equations in parallel, solve them one at a
1057 time, or pick any intermediate approach.
1058
1059 When you're going to need N two-dimensional bitmaps, each X (say, the
1060 number of blocks) by Y (say, the number of expressions), call this
1061 function. It's not important what X and Y represent; only that Y
1062 correspond to the things that can be done in parallel. This function will
1063 return an appropriate chunking factor C; you should solve C sets of
1064 equations in parallel. By going through this function, we can easily
1065 trade space against time; by solving fewer equations in parallel we use
1066 less space. */
1067
1068 static int
1069 get_bitmap_width (n, x, y)
1070 int n;
1071 int x;
1072 int y;
1073 {
1074 /* It's not really worth figuring out *exactly* how much memory will
1075 be used by a particular choice. The important thing is to get
1076 something approximately right. */
1077 size_t max_bitmap_memory = 10 * 1024 * 1024;
1078
1079 /* The number of bytes we'd use for a single column of minimum
1080 width. */
1081 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1082
1083 /* Often, it's reasonable just to solve all the equations in
1084 parallel. */
1085 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1086 return y;
1087
1088 /* Otherwise, pick the largest width we can, without going over the
1089 limit. */
1090 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1091 / column_size);
1092 }
1093 \f
1094 /* Compute the local properties of each recorded expression.
1095
1096 Local properties are those that are defined by the block, irrespective of
1097 other blocks.
1098
1099 An expression is transparent in a block if its operands are not modified
1100 in the block.
1101
1102 An expression is computed (locally available) in a block if it is computed
1103 at least once and expression would contain the same value if the
1104 computation was moved to the end of the block.
1105
1106 An expression is locally anticipatable in a block if it is computed at
1107 least once and expression would contain the same value if the computation
1108 was moved to the beginning of the block.
1109
1110 We call this routine for cprop, pre and code hoisting. They all compute
1111 basically the same information and thus can easily share this code.
1112
1113 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1114 properties. If NULL, then it is not necessary to compute or record that
1115 particular property.
1116
1117 SETP controls which hash table to look at. If zero, this routine looks at
1118 the expr hash table; if nonzero this routine looks at the set hash table.
1119 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1120 ABSALTERED. */
1121
1122 static void
1123 compute_local_properties (transp, comp, antloc, setp)
1124 sbitmap *transp;
1125 sbitmap *comp;
1126 sbitmap *antloc;
1127 int setp;
1128 {
1129 unsigned int i, hash_table_size;
1130 struct expr **hash_table;
1131
1132 /* Initialize any bitmaps that were passed in. */
1133 if (transp)
1134 {
1135 if (setp)
1136 sbitmap_vector_zero (transp, last_basic_block);
1137 else
1138 sbitmap_vector_ones (transp, last_basic_block);
1139 }
1140
1141 if (comp)
1142 sbitmap_vector_zero (comp, last_basic_block);
1143 if (antloc)
1144 sbitmap_vector_zero (antloc, last_basic_block);
1145
1146 /* We use the same code for cprop, pre and hoisting. For cprop
1147 we care about the set hash table, for pre and hoisting we
1148 care about the expr hash table. */
1149 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1150 hash_table = setp ? set_hash_table : expr_hash_table;
1151
1152 for (i = 0; i < hash_table_size; i++)
1153 {
1154 struct expr *expr;
1155
1156 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1157 {
1158 int indx = expr->bitmap_index;
1159 struct occr *occr;
1160
1161 /* The expression is transparent in this block if it is not killed.
1162 We start by assuming all are transparent [none are killed], and
1163 then reset the bits for those that are. */
1164 if (transp)
1165 compute_transp (expr->expr, indx, transp, setp);
1166
1167 /* The occurrences recorded in antic_occr are exactly those that
1168 we want to set to non-zero in ANTLOC. */
1169 if (antloc)
1170 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1171 {
1172 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1173
1174 /* While we're scanning the table, this is a good place to
1175 initialize this. */
1176 occr->deleted_p = 0;
1177 }
1178
1179 /* The occurrences recorded in avail_occr are exactly those that
1180 we want to set to non-zero in COMP. */
1181 if (comp)
1182 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1183 {
1184 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1185
1186 /* While we're scanning the table, this is a good place to
1187 initialize this. */
1188 occr->copied_p = 0;
1189 }
1190
1191 /* While we're scanning the table, this is a good place to
1192 initialize this. */
1193 expr->reaching_reg = 0;
1194 }
1195 }
1196 }
1197 \f
1198 /* Register set information.
1199
1200 `reg_set_table' records where each register is set or otherwise
1201 modified. */
1202
1203 static struct obstack reg_set_obstack;
1204
1205 static void
1206 alloc_reg_set_mem (n_regs)
1207 int n_regs;
1208 {
1209 unsigned int n;
1210
1211 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1212 n = reg_set_table_size * sizeof (struct reg_set *);
1213 reg_set_table = (struct reg_set **) gmalloc (n);
1214 memset ((char *) reg_set_table, 0, n);
1215
1216 gcc_obstack_init (&reg_set_obstack);
1217 }
1218
1219 static void
1220 free_reg_set_mem ()
1221 {
1222 free (reg_set_table);
1223 obstack_free (&reg_set_obstack, NULL);
1224 }
1225
1226 /* Record REGNO in the reg_set table. */
1227
1228 static void
1229 record_one_set (regno, insn)
1230 int regno;
1231 rtx insn;
1232 {
1233 /* Allocate a new reg_set element and link it onto the list. */
1234 struct reg_set *new_reg_info;
1235
1236 /* If the table isn't big enough, enlarge it. */
1237 if (regno >= reg_set_table_size)
1238 {
1239 int new_size = regno + REG_SET_TABLE_SLOP;
1240
1241 reg_set_table
1242 = (struct reg_set **) grealloc ((char *) reg_set_table,
1243 new_size * sizeof (struct reg_set *));
1244 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1245 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1246 reg_set_table_size = new_size;
1247 }
1248
1249 new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
1250 sizeof (struct reg_set));
1251 bytes_used += sizeof (struct reg_set);
1252 new_reg_info->insn = insn;
1253 new_reg_info->next = reg_set_table[regno];
1254 reg_set_table[regno] = new_reg_info;
1255 }
1256
1257 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1258 an insn. The DATA is really the instruction in which the SET is
1259 occurring. */
1260
1261 static void
1262 record_set_info (dest, setter, data)
1263 rtx dest, setter ATTRIBUTE_UNUSED;
1264 void *data;
1265 {
1266 rtx record_set_insn = (rtx) data;
1267
1268 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1269 record_one_set (REGNO (dest), record_set_insn);
1270 }
1271
1272 /* Scan the function and record each set of each pseudo-register.
1273
1274 This is called once, at the start of the gcse pass. See the comments for
1275 `reg_set_table' for further documenation. */
1276
1277 static void
1278 compute_sets (f)
1279 rtx f;
1280 {
1281 rtx insn;
1282
1283 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1284 if (INSN_P (insn))
1285 note_stores (PATTERN (insn), record_set_info, insn);
1286 }
1287 \f
1288 /* Hash table support. */
1289
1290 /* For each register, the cuid of the first/last insn in the block
1291 that set it, or -1 if not set. */
1292 #define NEVER_SET -1
1293
1294 struct reg_avail_info
1295 {
1296 basic_block last_bb;
1297 int first_set;
1298 int last_set;
1299 };
1300
1301 static struct reg_avail_info *reg_avail_info;
1302 static basic_block current_bb;
1303
1304
1305 /* See whether X, the source of a set, is something we want to consider for
1306 GCSE. */
1307
1308 static GTY(()) rtx test_insn;
1309 static int
1310 want_to_gcse_p (x)
1311 rtx x;
1312 {
1313 int num_clobbers = 0;
1314 int icode;
1315
1316 switch (GET_CODE (x))
1317 {
1318 case REG:
1319 case SUBREG:
1320 case CONST_INT:
1321 case CONST_DOUBLE:
1322 case CONST_VECTOR:
1323 case CALL:
1324 return 0;
1325
1326 default:
1327 break;
1328 }
1329
1330 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1331 if (general_operand (x, GET_MODE (x)))
1332 return 1;
1333 else if (GET_MODE (x) == VOIDmode)
1334 return 0;
1335
1336 /* Otherwise, check if we can make a valid insn from it. First initialize
1337 our test insn if we haven't already. */
1338 if (test_insn == 0)
1339 {
1340 test_insn
1341 = make_insn_raw (gen_rtx_SET (VOIDmode,
1342 gen_rtx_REG (word_mode,
1343 FIRST_PSEUDO_REGISTER * 2),
1344 const0_rtx));
1345 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1346 }
1347
1348 /* Now make an insn like the one we would make when GCSE'ing and see if
1349 valid. */
1350 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1351 SET_SRC (PATTERN (test_insn)) = x;
1352 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1353 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1354 }
1355
1356 /* Return non-zero if the operands of expression X are unchanged from the
1357 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1358 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1359
1360 static int
1361 oprs_unchanged_p (x, insn, avail_p)
1362 rtx x, insn;
1363 int avail_p;
1364 {
1365 int i, j;
1366 enum rtx_code code;
1367 const char *fmt;
1368
1369 if (x == 0)
1370 return 1;
1371
1372 code = GET_CODE (x);
1373 switch (code)
1374 {
1375 case REG:
1376 {
1377 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1378
1379 if (info->last_bb != current_bb)
1380 return 1;
1381 if (avail_p)
1382 return info->last_set < INSN_CUID (insn);
1383 else
1384 return info->first_set >= INSN_CUID (insn);
1385 }
1386
1387 case MEM:
1388 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1389 x, avail_p))
1390 return 0;
1391 else
1392 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1393
1394 case PRE_DEC:
1395 case PRE_INC:
1396 case POST_DEC:
1397 case POST_INC:
1398 case PRE_MODIFY:
1399 case POST_MODIFY:
1400 return 0;
1401
1402 case PC:
1403 case CC0: /*FIXME*/
1404 case CONST:
1405 case CONST_INT:
1406 case CONST_DOUBLE:
1407 case CONST_VECTOR:
1408 case SYMBOL_REF:
1409 case LABEL_REF:
1410 case ADDR_VEC:
1411 case ADDR_DIFF_VEC:
1412 return 1;
1413
1414 default:
1415 break;
1416 }
1417
1418 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1419 {
1420 if (fmt[i] == 'e')
1421 {
1422 /* If we are about to do the last recursive call needed at this
1423 level, change it into iteration. This function is called enough
1424 to be worth it. */
1425 if (i == 0)
1426 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1427
1428 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1429 return 0;
1430 }
1431 else if (fmt[i] == 'E')
1432 for (j = 0; j < XVECLEN (x, i); j++)
1433 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1434 return 0;
1435 }
1436
1437 return 1;
1438 }
1439
1440 /* Used for communication between mems_conflict_for_gcse_p and
1441 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1442 conflict between two memory references. */
1443 static int gcse_mems_conflict_p;
1444
1445 /* Used for communication between mems_conflict_for_gcse_p and
1446 load_killed_in_block_p. A memory reference for a load instruction,
1447 mems_conflict_for_gcse_p will see if a memory store conflicts with
1448 this memory load. */
1449 static rtx gcse_mem_operand;
1450
1451 /* DEST is the output of an instruction. If it is a memory reference, and
1452 possibly conflicts with the load found in gcse_mem_operand, then set
1453 gcse_mems_conflict_p to a nonzero value. */
1454
1455 static void
1456 mems_conflict_for_gcse_p (dest, setter, data)
1457 rtx dest, setter ATTRIBUTE_UNUSED;
1458 void *data ATTRIBUTE_UNUSED;
1459 {
1460 while (GET_CODE (dest) == SUBREG
1461 || GET_CODE (dest) == ZERO_EXTRACT
1462 || GET_CODE (dest) == SIGN_EXTRACT
1463 || GET_CODE (dest) == STRICT_LOW_PART)
1464 dest = XEXP (dest, 0);
1465
1466 /* If DEST is not a MEM, then it will not conflict with the load. Note
1467 that function calls are assumed to clobber memory, but are handled
1468 elsewhere. */
1469 if (GET_CODE (dest) != MEM)
1470 return;
1471
1472 /* If we are setting a MEM in our list of specially recognized MEMs,
1473 don't mark as killed this time. */
1474
1475 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1476 {
1477 if (!find_rtx_in_ldst (dest))
1478 gcse_mems_conflict_p = 1;
1479 return;
1480 }
1481
1482 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1483 rtx_addr_varies_p))
1484 gcse_mems_conflict_p = 1;
1485 }
1486
1487 /* Return nonzero if the expression in X (a memory reference) is killed
1488 in block BB before or after the insn with the CUID in UID_LIMIT.
1489 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1490 before UID_LIMIT.
1491
1492 To check the entire block, set UID_LIMIT to max_uid + 1 and
1493 AVAIL_P to 0. */
1494
1495 static int
1496 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1497 basic_block bb;
1498 int uid_limit;
1499 rtx x;
1500 int avail_p;
1501 {
1502 rtx list_entry = modify_mem_list[bb->index];
1503 while (list_entry)
1504 {
1505 rtx setter;
1506 /* Ignore entries in the list that do not apply. */
1507 if ((avail_p
1508 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1509 || (! avail_p
1510 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1511 {
1512 list_entry = XEXP (list_entry, 1);
1513 continue;
1514 }
1515
1516 setter = XEXP (list_entry, 0);
1517
1518 /* If SETTER is a call everything is clobbered. Note that calls
1519 to pure functions are never put on the list, so we need not
1520 worry about them. */
1521 if (GET_CODE (setter) == CALL_INSN)
1522 return 1;
1523
1524 /* SETTER must be an INSN of some kind that sets memory. Call
1525 note_stores to examine each hunk of memory that is modified.
1526
1527 The note_stores interface is pretty limited, so we have to
1528 communicate via global variables. Yuk. */
1529 gcse_mem_operand = x;
1530 gcse_mems_conflict_p = 0;
1531 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1532 if (gcse_mems_conflict_p)
1533 return 1;
1534 list_entry = XEXP (list_entry, 1);
1535 }
1536 return 0;
1537 }
1538
1539 /* Return non-zero if the operands of expression X are unchanged from
1540 the start of INSN's basic block up to but not including INSN. */
1541
1542 static int
1543 oprs_anticipatable_p (x, insn)
1544 rtx x, insn;
1545 {
1546 return oprs_unchanged_p (x, insn, 0);
1547 }
1548
1549 /* Return non-zero if the operands of expression X are unchanged from
1550 INSN to the end of INSN's basic block. */
1551
1552 static int
1553 oprs_available_p (x, insn)
1554 rtx x, insn;
1555 {
1556 return oprs_unchanged_p (x, insn, 1);
1557 }
1558
1559 /* Hash expression X.
1560
1561 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1562 indicating if a volatile operand is found or if the expression contains
1563 something we don't want to insert in the table.
1564
1565 ??? One might want to merge this with canon_hash. Later. */
1566
1567 static unsigned int
1568 hash_expr (x, mode, do_not_record_p, hash_table_size)
1569 rtx x;
1570 enum machine_mode mode;
1571 int *do_not_record_p;
1572 int hash_table_size;
1573 {
1574 unsigned int hash;
1575
1576 *do_not_record_p = 0;
1577
1578 hash = hash_expr_1 (x, mode, do_not_record_p);
1579 return hash % hash_table_size;
1580 }
1581
1582 /* Hash a string. Just add its bytes up. */
1583
1584 static inline unsigned
1585 hash_string_1 (ps)
1586 const char *ps;
1587 {
1588 unsigned hash = 0;
1589 const unsigned char *p = (const unsigned char *) ps;
1590
1591 if (p)
1592 while (*p)
1593 hash += *p++;
1594
1595 return hash;
1596 }
1597
1598 /* Subroutine of hash_expr to do the actual work. */
1599
1600 static unsigned int
1601 hash_expr_1 (x, mode, do_not_record_p)
1602 rtx x;
1603 enum machine_mode mode;
1604 int *do_not_record_p;
1605 {
1606 int i, j;
1607 unsigned hash = 0;
1608 enum rtx_code code;
1609 const char *fmt;
1610
1611 /* Used to turn recursion into iteration. We can't rely on GCC's
1612 tail-recursion eliminatio since we need to keep accumulating values
1613 in HASH. */
1614
1615 if (x == 0)
1616 return hash;
1617
1618 repeat:
1619 code = GET_CODE (x);
1620 switch (code)
1621 {
1622 case REG:
1623 hash += ((unsigned int) REG << 7) + REGNO (x);
1624 return hash;
1625
1626 case CONST_INT:
1627 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1628 + (unsigned int) INTVAL (x));
1629 return hash;
1630
1631 case CONST_DOUBLE:
1632 /* This is like the general case, except that it only counts
1633 the integers representing the constant. */
1634 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1635 if (GET_MODE (x) != VOIDmode)
1636 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1637 hash += (unsigned int) XWINT (x, i);
1638 else
1639 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1640 + (unsigned int) CONST_DOUBLE_HIGH (x));
1641 return hash;
1642
1643 case CONST_VECTOR:
1644 {
1645 int units;
1646 rtx elt;
1647
1648 units = CONST_VECTOR_NUNITS (x);
1649
1650 for (i = 0; i < units; ++i)
1651 {
1652 elt = CONST_VECTOR_ELT (x, i);
1653 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1654 }
1655
1656 return hash;
1657 }
1658
1659 /* Assume there is only one rtx object for any given label. */
1660 case LABEL_REF:
1661 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1662 differences and differences between each stage's debugging dumps. */
1663 hash += (((unsigned int) LABEL_REF << 7)
1664 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1665 return hash;
1666
1667 case SYMBOL_REF:
1668 {
1669 /* Don't hash on the symbol's address to avoid bootstrap differences.
1670 Different hash values may cause expressions to be recorded in
1671 different orders and thus different registers to be used in the
1672 final assembler. This also avoids differences in the dump files
1673 between various stages. */
1674 unsigned int h = 0;
1675 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1676
1677 while (*p)
1678 h += (h << 7) + *p++; /* ??? revisit */
1679
1680 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1681 return hash;
1682 }
1683
1684 case MEM:
1685 if (MEM_VOLATILE_P (x))
1686 {
1687 *do_not_record_p = 1;
1688 return 0;
1689 }
1690
1691 hash += (unsigned int) MEM;
1692 /* We used alias set for hashing, but this is not good, since the alias
1693 set may differ in -fprofile-arcs and -fbranch-probabilities compilation
1694 causing the profiles to fail to match. */
1695 x = XEXP (x, 0);
1696 goto repeat;
1697
1698 case PRE_DEC:
1699 case PRE_INC:
1700 case POST_DEC:
1701 case POST_INC:
1702 case PC:
1703 case CC0:
1704 case CALL:
1705 case UNSPEC_VOLATILE:
1706 *do_not_record_p = 1;
1707 return 0;
1708
1709 case ASM_OPERANDS:
1710 if (MEM_VOLATILE_P (x))
1711 {
1712 *do_not_record_p = 1;
1713 return 0;
1714 }
1715 else
1716 {
1717 /* We don't want to take the filename and line into account. */
1718 hash += (unsigned) code + (unsigned) GET_MODE (x)
1719 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1720 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1721 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1722
1723 if (ASM_OPERANDS_INPUT_LENGTH (x))
1724 {
1725 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1726 {
1727 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1728 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1729 do_not_record_p)
1730 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1731 (x, i)));
1732 }
1733
1734 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1735 x = ASM_OPERANDS_INPUT (x, 0);
1736 mode = GET_MODE (x);
1737 goto repeat;
1738 }
1739 return hash;
1740 }
1741
1742 default:
1743 break;
1744 }
1745
1746 hash += (unsigned) code + (unsigned) GET_MODE (x);
1747 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1748 {
1749 if (fmt[i] == 'e')
1750 {
1751 /* If we are about to do the last recursive call
1752 needed at this level, change it into iteration.
1753 This function is called enough to be worth it. */
1754 if (i == 0)
1755 {
1756 x = XEXP (x, i);
1757 goto repeat;
1758 }
1759
1760 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1761 if (*do_not_record_p)
1762 return 0;
1763 }
1764
1765 else if (fmt[i] == 'E')
1766 for (j = 0; j < XVECLEN (x, i); j++)
1767 {
1768 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1769 if (*do_not_record_p)
1770 return 0;
1771 }
1772
1773 else if (fmt[i] == 's')
1774 hash += hash_string_1 (XSTR (x, i));
1775 else if (fmt[i] == 'i')
1776 hash += (unsigned int) XINT (x, i);
1777 else
1778 abort ();
1779 }
1780
1781 return hash;
1782 }
1783
1784 /* Hash a set of register REGNO.
1785
1786 Sets are hashed on the register that is set. This simplifies the PRE copy
1787 propagation code.
1788
1789 ??? May need to make things more elaborate. Later, as necessary. */
1790
1791 static unsigned int
1792 hash_set (regno, hash_table_size)
1793 int regno;
1794 int hash_table_size;
1795 {
1796 unsigned int hash;
1797
1798 hash = regno;
1799 return hash % hash_table_size;
1800 }
1801
1802 /* Return non-zero if exp1 is equivalent to exp2.
1803 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1804
1805 static int
1806 expr_equiv_p (x, y)
1807 rtx x, y;
1808 {
1809 int i, j;
1810 enum rtx_code code;
1811 const char *fmt;
1812
1813 if (x == y)
1814 return 1;
1815
1816 if (x == 0 || y == 0)
1817 return x == y;
1818
1819 code = GET_CODE (x);
1820 if (code != GET_CODE (y))
1821 return 0;
1822
1823 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1824 if (GET_MODE (x) != GET_MODE (y))
1825 return 0;
1826
1827 switch (code)
1828 {
1829 case PC:
1830 case CC0:
1831 return x == y;
1832
1833 case CONST_INT:
1834 return INTVAL (x) == INTVAL (y);
1835
1836 case LABEL_REF:
1837 return XEXP (x, 0) == XEXP (y, 0);
1838
1839 case SYMBOL_REF:
1840 return XSTR (x, 0) == XSTR (y, 0);
1841
1842 case REG:
1843 return REGNO (x) == REGNO (y);
1844
1845 case MEM:
1846 /* Can't merge two expressions in different alias sets, since we can
1847 decide that the expression is transparent in a block when it isn't,
1848 due to it being set with the different alias set. */
1849 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1850 return 0;
1851 break;
1852
1853 /* For commutative operations, check both orders. */
1854 case PLUS:
1855 case MULT:
1856 case AND:
1857 case IOR:
1858 case XOR:
1859 case NE:
1860 case EQ:
1861 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1862 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1863 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1864 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1865
1866 case ASM_OPERANDS:
1867 /* We don't use the generic code below because we want to
1868 disregard filename and line numbers. */
1869
1870 /* A volatile asm isn't equivalent to any other. */
1871 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1872 return 0;
1873
1874 if (GET_MODE (x) != GET_MODE (y)
1875 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1876 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1877 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1878 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1879 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1880 return 0;
1881
1882 if (ASM_OPERANDS_INPUT_LENGTH (x))
1883 {
1884 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1885 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1886 ASM_OPERANDS_INPUT (y, i))
1887 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1888 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1889 return 0;
1890 }
1891
1892 return 1;
1893
1894 default:
1895 break;
1896 }
1897
1898 /* Compare the elements. If any pair of corresponding elements
1899 fail to match, return 0 for the whole thing. */
1900
1901 fmt = GET_RTX_FORMAT (code);
1902 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1903 {
1904 switch (fmt[i])
1905 {
1906 case 'e':
1907 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1908 return 0;
1909 break;
1910
1911 case 'E':
1912 if (XVECLEN (x, i) != XVECLEN (y, i))
1913 return 0;
1914 for (j = 0; j < XVECLEN (x, i); j++)
1915 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1916 return 0;
1917 break;
1918
1919 case 's':
1920 if (strcmp (XSTR (x, i), XSTR (y, i)))
1921 return 0;
1922 break;
1923
1924 case 'i':
1925 if (XINT (x, i) != XINT (y, i))
1926 return 0;
1927 break;
1928
1929 case 'w':
1930 if (XWINT (x, i) != XWINT (y, i))
1931 return 0;
1932 break;
1933
1934 case '0':
1935 break;
1936
1937 default:
1938 abort ();
1939 }
1940 }
1941
1942 return 1;
1943 }
1944
1945 /* Insert expression X in INSN in the hash table.
1946 If it is already present, record it as the last occurrence in INSN's
1947 basic block.
1948
1949 MODE is the mode of the value X is being stored into.
1950 It is only used if X is a CONST_INT.
1951
1952 ANTIC_P is non-zero if X is an anticipatable expression.
1953 AVAIL_P is non-zero if X is an available expression. */
1954
1955 static void
1956 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1957 rtx x;
1958 enum machine_mode mode;
1959 rtx insn;
1960 int antic_p, avail_p;
1961 {
1962 int found, do_not_record_p;
1963 unsigned int hash;
1964 struct expr *cur_expr, *last_expr = NULL;
1965 struct occr *antic_occr, *avail_occr;
1966 struct occr *last_occr = NULL;
1967
1968 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1969
1970 /* Do not insert expression in table if it contains volatile operands,
1971 or if hash_expr determines the expression is something we don't want
1972 to or can't handle. */
1973 if (do_not_record_p)
1974 return;
1975
1976 cur_expr = expr_hash_table[hash];
1977 found = 0;
1978
1979 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1980 {
1981 /* If the expression isn't found, save a pointer to the end of
1982 the list. */
1983 last_expr = cur_expr;
1984 cur_expr = cur_expr->next_same_hash;
1985 }
1986
1987 if (! found)
1988 {
1989 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1990 bytes_used += sizeof (struct expr);
1991 if (expr_hash_table[hash] == NULL)
1992 /* This is the first pattern that hashed to this index. */
1993 expr_hash_table[hash] = cur_expr;
1994 else
1995 /* Add EXPR to end of this hash chain. */
1996 last_expr->next_same_hash = cur_expr;
1997
1998 /* Set the fields of the expr element. */
1999 cur_expr->expr = x;
2000 cur_expr->bitmap_index = n_exprs++;
2001 cur_expr->next_same_hash = NULL;
2002 cur_expr->antic_occr = NULL;
2003 cur_expr->avail_occr = NULL;
2004 }
2005
2006 /* Now record the occurrence(s). */
2007 if (antic_p)
2008 {
2009 antic_occr = cur_expr->antic_occr;
2010
2011 /* Search for another occurrence in the same basic block. */
2012 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
2013 {
2014 /* If an occurrence isn't found, save a pointer to the end of
2015 the list. */
2016 last_occr = antic_occr;
2017 antic_occr = antic_occr->next;
2018 }
2019
2020 if (antic_occr)
2021 /* Found another instance of the expression in the same basic block.
2022 Prefer the currently recorded one. We want the first one in the
2023 block and the block is scanned from start to end. */
2024 ; /* nothing to do */
2025 else
2026 {
2027 /* First occurrence of this expression in this basic block. */
2028 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2029 bytes_used += sizeof (struct occr);
2030 /* First occurrence of this expression in any block? */
2031 if (cur_expr->antic_occr == NULL)
2032 cur_expr->antic_occr = antic_occr;
2033 else
2034 last_occr->next = antic_occr;
2035
2036 antic_occr->insn = insn;
2037 antic_occr->next = NULL;
2038 }
2039 }
2040
2041 if (avail_p)
2042 {
2043 avail_occr = cur_expr->avail_occr;
2044
2045 /* Search for another occurrence in the same basic block. */
2046 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2047 {
2048 /* If an occurrence isn't found, save a pointer to the end of
2049 the list. */
2050 last_occr = avail_occr;
2051 avail_occr = avail_occr->next;
2052 }
2053
2054 if (avail_occr)
2055 /* Found another instance of the expression in the same basic block.
2056 Prefer this occurrence to the currently recorded one. We want
2057 the last one in the block and the block is scanned from start
2058 to end. */
2059 avail_occr->insn = insn;
2060 else
2061 {
2062 /* First occurrence of this expression in this basic block. */
2063 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2064 bytes_used += sizeof (struct occr);
2065
2066 /* First occurrence of this expression in any block? */
2067 if (cur_expr->avail_occr == NULL)
2068 cur_expr->avail_occr = avail_occr;
2069 else
2070 last_occr->next = avail_occr;
2071
2072 avail_occr->insn = insn;
2073 avail_occr->next = NULL;
2074 }
2075 }
2076 }
2077
2078 /* Insert pattern X in INSN in the hash table.
2079 X is a SET of a reg to either another reg or a constant.
2080 If it is already present, record it as the last occurrence in INSN's
2081 basic block. */
2082
2083 static void
2084 insert_set_in_table (x, insn)
2085 rtx x;
2086 rtx insn;
2087 {
2088 int found;
2089 unsigned int hash;
2090 struct expr *cur_expr, *last_expr = NULL;
2091 struct occr *cur_occr, *last_occr = NULL;
2092
2093 if (GET_CODE (x) != SET
2094 || GET_CODE (SET_DEST (x)) != REG)
2095 abort ();
2096
2097 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
2098
2099 cur_expr = set_hash_table[hash];
2100 found = 0;
2101
2102 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2103 {
2104 /* If the expression isn't found, save a pointer to the end of
2105 the list. */
2106 last_expr = cur_expr;
2107 cur_expr = cur_expr->next_same_hash;
2108 }
2109
2110 if (! found)
2111 {
2112 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2113 bytes_used += sizeof (struct expr);
2114 if (set_hash_table[hash] == NULL)
2115 /* This is the first pattern that hashed to this index. */
2116 set_hash_table[hash] = cur_expr;
2117 else
2118 /* Add EXPR to end of this hash chain. */
2119 last_expr->next_same_hash = cur_expr;
2120
2121 /* Set the fields of the expr element.
2122 We must copy X because it can be modified when copy propagation is
2123 performed on its operands. */
2124 cur_expr->expr = copy_rtx (x);
2125 cur_expr->bitmap_index = n_sets++;
2126 cur_expr->next_same_hash = NULL;
2127 cur_expr->antic_occr = NULL;
2128 cur_expr->avail_occr = NULL;
2129 }
2130
2131 /* Now record the occurrence. */
2132 cur_occr = cur_expr->avail_occr;
2133
2134 /* Search for another occurrence in the same basic block. */
2135 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2136 {
2137 /* If an occurrence isn't found, save a pointer to the end of
2138 the list. */
2139 last_occr = cur_occr;
2140 cur_occr = cur_occr->next;
2141 }
2142
2143 if (cur_occr)
2144 /* Found another instance of the expression in the same basic block.
2145 Prefer this occurrence to the currently recorded one. We want the
2146 last one in the block and the block is scanned from start to end. */
2147 cur_occr->insn = insn;
2148 else
2149 {
2150 /* First occurrence of this expression in this basic block. */
2151 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2152 bytes_used += sizeof (struct occr);
2153
2154 /* First occurrence of this expression in any block? */
2155 if (cur_expr->avail_occr == NULL)
2156 cur_expr->avail_occr = cur_occr;
2157 else
2158 last_occr->next = cur_occr;
2159
2160 cur_occr->insn = insn;
2161 cur_occr->next = NULL;
2162 }
2163 }
2164
2165 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
2166 non-zero, this is for the assignment hash table, otherwise it is for the
2167 expression hash table. */
2168
2169 static void
2170 hash_scan_set (pat, insn, set_p)
2171 rtx pat, insn;
2172 int set_p;
2173 {
2174 rtx src = SET_SRC (pat);
2175 rtx dest = SET_DEST (pat);
2176 rtx note;
2177
2178 if (GET_CODE (src) == CALL)
2179 hash_scan_call (src, insn);
2180
2181 else if (GET_CODE (dest) == REG)
2182 {
2183 unsigned int regno = REGNO (dest);
2184 rtx tmp;
2185
2186 /* If this is a single set and we are doing constant propagation,
2187 see if a REG_NOTE shows this equivalent to a constant. */
2188 if (set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2189 && CONSTANT_P (XEXP (note, 0)))
2190 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2191
2192 /* Only record sets of pseudo-regs in the hash table. */
2193 if (! set_p
2194 && regno >= FIRST_PSEUDO_REGISTER
2195 /* Don't GCSE something if we can't do a reg/reg copy. */
2196 && can_copy_p [GET_MODE (dest)]
2197 /* GCSE commonly inserts instruction after the insn. We can't
2198 do that easily for EH_REGION notes so disable GCSE on these
2199 for now. */
2200 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2201 /* Is SET_SRC something we want to gcse? */
2202 && want_to_gcse_p (src)
2203 /* Don't CSE a nop. */
2204 && ! set_noop_p (pat)
2205 /* Don't GCSE if it has attached REG_EQUIV note.
2206 At this point this only function parameters should have
2207 REG_EQUIV notes and if the argument slot is used somewhere
2208 explicitly, it means address of parameter has been taken,
2209 so we should not extend the lifetime of the pseudo. */
2210 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2211 || GET_CODE (XEXP (note, 0)) != MEM))
2212 {
2213 /* An expression is not anticipatable if its operands are
2214 modified before this insn or if this is not the only SET in
2215 this insn. */
2216 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2217 /* An expression is not available if its operands are
2218 subsequently modified, including this insn. It's also not
2219 available if this is a branch, because we can't insert
2220 a set after the branch. */
2221 int avail_p = (oprs_available_p (src, insn)
2222 && ! JUMP_P (insn));
2223
2224 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
2225 }
2226
2227 /* Record sets for constant/copy propagation. */
2228 else if (set_p
2229 && regno >= FIRST_PSEUDO_REGISTER
2230 && ((GET_CODE (src) == REG
2231 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2232 && can_copy_p [GET_MODE (dest)]
2233 && REGNO (src) != regno)
2234 || CONSTANT_P (src))
2235 /* A copy is not available if its src or dest is subsequently
2236 modified. Here we want to search from INSN+1 on, but
2237 oprs_available_p searches from INSN on. */
2238 && (insn == BLOCK_END (BLOCK_NUM (insn))
2239 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2240 && oprs_available_p (pat, tmp))))
2241 insert_set_in_table (pat, insn);
2242 }
2243 }
2244
2245 static void
2246 hash_scan_clobber (x, insn)
2247 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2248 {
2249 /* Currently nothing to do. */
2250 }
2251
2252 static void
2253 hash_scan_call (x, insn)
2254 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2255 {
2256 /* Currently nothing to do. */
2257 }
2258
2259 /* Process INSN and add hash table entries as appropriate.
2260
2261 Only available expressions that set a single pseudo-reg are recorded.
2262
2263 Single sets in a PARALLEL could be handled, but it's an extra complication
2264 that isn't dealt with right now. The trick is handling the CLOBBERs that
2265 are also in the PARALLEL. Later.
2266
2267 If SET_P is non-zero, this is for the assignment hash table,
2268 otherwise it is for the expression hash table.
2269 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2270 not record any expressions. */
2271
2272 static void
2273 hash_scan_insn (insn, set_p, in_libcall_block)
2274 rtx insn;
2275 int set_p;
2276 int in_libcall_block;
2277 {
2278 rtx pat = PATTERN (insn);
2279 int i;
2280
2281 if (in_libcall_block)
2282 return;
2283
2284 /* Pick out the sets of INSN and for other forms of instructions record
2285 what's been modified. */
2286
2287 if (GET_CODE (pat) == SET)
2288 hash_scan_set (pat, insn, set_p);
2289 else if (GET_CODE (pat) == PARALLEL)
2290 for (i = 0; i < XVECLEN (pat, 0); i++)
2291 {
2292 rtx x = XVECEXP (pat, 0, i);
2293
2294 if (GET_CODE (x) == SET)
2295 hash_scan_set (x, insn, set_p);
2296 else if (GET_CODE (x) == CLOBBER)
2297 hash_scan_clobber (x, insn);
2298 else if (GET_CODE (x) == CALL)
2299 hash_scan_call (x, insn);
2300 }
2301
2302 else if (GET_CODE (pat) == CLOBBER)
2303 hash_scan_clobber (pat, insn);
2304 else if (GET_CODE (pat) == CALL)
2305 hash_scan_call (pat, insn);
2306 }
2307
2308 static void
2309 dump_hash_table (file, name, table, table_size, total_size)
2310 FILE *file;
2311 const char *name;
2312 struct expr **table;
2313 int table_size, total_size;
2314 {
2315 int i;
2316 /* Flattened out table, so it's printed in proper order. */
2317 struct expr **flat_table;
2318 unsigned int *hash_val;
2319 struct expr *expr;
2320
2321 flat_table
2322 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
2323 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
2324
2325 for (i = 0; i < table_size; i++)
2326 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
2327 {
2328 flat_table[expr->bitmap_index] = expr;
2329 hash_val[expr->bitmap_index] = i;
2330 }
2331
2332 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2333 name, table_size, total_size);
2334
2335 for (i = 0; i < total_size; i++)
2336 if (flat_table[i] != 0)
2337 {
2338 expr = flat_table[i];
2339 fprintf (file, "Index %d (hash value %d)\n ",
2340 expr->bitmap_index, hash_val[i]);
2341 print_rtl (file, expr->expr);
2342 fprintf (file, "\n");
2343 }
2344
2345 fprintf (file, "\n");
2346
2347 free (flat_table);
2348 free (hash_val);
2349 }
2350
2351 /* Record register first/last/block set information for REGNO in INSN.
2352
2353 first_set records the first place in the block where the register
2354 is set and is used to compute "anticipatability".
2355
2356 last_set records the last place in the block where the register
2357 is set and is used to compute "availability".
2358
2359 last_bb records the block for which first_set and last_set are
2360 valid, as a quick test to invalidate them.
2361
2362 reg_set_in_block records whether the register is set in the block
2363 and is used to compute "transparency". */
2364
2365 static void
2366 record_last_reg_set_info (insn, regno)
2367 rtx insn;
2368 int regno;
2369 {
2370 struct reg_avail_info *info = &reg_avail_info[regno];
2371 int cuid = INSN_CUID (insn);
2372
2373 info->last_set = cuid;
2374 if (info->last_bb != current_bb)
2375 {
2376 info->last_bb = current_bb;
2377 info->first_set = cuid;
2378 SET_BIT (reg_set_in_block[current_bb->index], regno);
2379 }
2380 }
2381
2382
2383 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2384 Note we store a pair of elements in the list, so they have to be
2385 taken off pairwise. */
2386
2387 static void
2388 canon_list_insert (dest, unused1, v_insn)
2389 rtx dest ATTRIBUTE_UNUSED;
2390 rtx unused1 ATTRIBUTE_UNUSED;
2391 void * v_insn;
2392 {
2393 rtx dest_addr, insn;
2394 int bb;
2395
2396 while (GET_CODE (dest) == SUBREG
2397 || GET_CODE (dest) == ZERO_EXTRACT
2398 || GET_CODE (dest) == SIGN_EXTRACT
2399 || GET_CODE (dest) == STRICT_LOW_PART)
2400 dest = XEXP (dest, 0);
2401
2402 /* If DEST is not a MEM, then it will not conflict with a load. Note
2403 that function calls are assumed to clobber memory, but are handled
2404 elsewhere. */
2405
2406 if (GET_CODE (dest) != MEM)
2407 return;
2408
2409 dest_addr = get_addr (XEXP (dest, 0));
2410 dest_addr = canon_rtx (dest_addr);
2411 insn = (rtx) v_insn;
2412 bb = BLOCK_NUM (insn);
2413
2414 canon_modify_mem_list[bb] =
2415 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
2416 canon_modify_mem_list[bb] =
2417 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
2418 bitmap_set_bit (canon_modify_mem_list_set, bb);
2419 }
2420
2421 /* Record memory modification information for INSN. We do not actually care
2422 about the memory location(s) that are set, or even how they are set (consider
2423 a CALL_INSN). We merely need to record which insns modify memory. */
2424
2425 static void
2426 record_last_mem_set_info (insn)
2427 rtx insn;
2428 {
2429 int bb = BLOCK_NUM (insn);
2430
2431 /* load_killed_in_block_p will handle the case of calls clobbering
2432 everything. */
2433 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
2434 bitmap_set_bit (modify_mem_list_set, bb);
2435
2436 if (GET_CODE (insn) == CALL_INSN)
2437 {
2438 /* Note that traversals of this loop (other than for free-ing)
2439 will break after encountering a CALL_INSN. So, there's no
2440 need to insert a pair of items, as canon_list_insert does. */
2441 canon_modify_mem_list[bb] =
2442 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
2443 bitmap_set_bit (canon_modify_mem_list_set, bb);
2444 }
2445 else
2446 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2447 }
2448
2449 /* Called from compute_hash_table via note_stores to handle one
2450 SET or CLOBBER in an insn. DATA is really the instruction in which
2451 the SET is taking place. */
2452
2453 static void
2454 record_last_set_info (dest, setter, data)
2455 rtx dest, setter ATTRIBUTE_UNUSED;
2456 void *data;
2457 {
2458 rtx last_set_insn = (rtx) data;
2459
2460 if (GET_CODE (dest) == SUBREG)
2461 dest = SUBREG_REG (dest);
2462
2463 if (GET_CODE (dest) == REG)
2464 record_last_reg_set_info (last_set_insn, REGNO (dest));
2465 else if (GET_CODE (dest) == MEM
2466 /* Ignore pushes, they clobber nothing. */
2467 && ! push_operand (dest, GET_MODE (dest)))
2468 record_last_mem_set_info (last_set_insn);
2469 }
2470
2471 /* Top level function to create an expression or assignment hash table.
2472
2473 Expression entries are placed in the hash table if
2474 - they are of the form (set (pseudo-reg) src),
2475 - src is something we want to perform GCSE on,
2476 - none of the operands are subsequently modified in the block
2477
2478 Assignment entries are placed in the hash table if
2479 - they are of the form (set (pseudo-reg) src),
2480 - src is something we want to perform const/copy propagation on,
2481 - none of the operands or target are subsequently modified in the block
2482
2483 Currently src must be a pseudo-reg or a const_int.
2484
2485 F is the first insn.
2486 SET_P is non-zero for computing the assignment hash table. */
2487
2488 static void
2489 compute_hash_table (set_p)
2490 int set_p;
2491 {
2492 unsigned int i;
2493
2494 /* While we compute the hash table we also compute a bit array of which
2495 registers are set in which blocks.
2496 ??? This isn't needed during const/copy propagation, but it's cheap to
2497 compute. Later. */
2498 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2499
2500 /* re-Cache any INSN_LIST nodes we have allocated. */
2501 clear_modify_mem_tables ();
2502 /* Some working arrays used to track first and last set in each block. */
2503 reg_avail_info = (struct reg_avail_info*)
2504 gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2505
2506 for (i = 0; i < max_gcse_regno; ++i)
2507 reg_avail_info[i].last_bb = NULL;
2508
2509 FOR_EACH_BB (current_bb)
2510 {
2511 rtx insn;
2512 unsigned int regno;
2513 int in_libcall_block;
2514
2515 /* First pass over the instructions records information used to
2516 determine when registers and memory are first and last set.
2517 ??? hard-reg reg_set_in_block computation
2518 could be moved to compute_sets since they currently don't change. */
2519
2520 for (insn = current_bb->head;
2521 insn && insn != NEXT_INSN (current_bb->end);
2522 insn = NEXT_INSN (insn))
2523 {
2524 if (! INSN_P (insn))
2525 continue;
2526
2527 if (GET_CODE (insn) == CALL_INSN)
2528 {
2529 bool clobbers_all = false;
2530 #ifdef NON_SAVING_SETJMP
2531 if (NON_SAVING_SETJMP
2532 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2533 clobbers_all = true;
2534 #endif
2535
2536 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2537 if (clobbers_all
2538 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2539 record_last_reg_set_info (insn, regno);
2540
2541 mark_call (insn);
2542 }
2543
2544 note_stores (PATTERN (insn), record_last_set_info, insn);
2545 }
2546
2547 /* The next pass builds the hash table. */
2548
2549 for (insn = current_bb->head, in_libcall_block = 0;
2550 insn && insn != NEXT_INSN (current_bb->end);
2551 insn = NEXT_INSN (insn))
2552 if (INSN_P (insn))
2553 {
2554 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2555 in_libcall_block = 1;
2556 else if (set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2557 in_libcall_block = 0;
2558 hash_scan_insn (insn, set_p, in_libcall_block);
2559 if (!set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2560 in_libcall_block = 0;
2561 }
2562 }
2563
2564 free (reg_avail_info);
2565 reg_avail_info = NULL;
2566 }
2567
2568 /* Allocate space for the set hash table.
2569 N_INSNS is the number of instructions in the function.
2570 It is used to determine the number of buckets to use. */
2571
2572 static void
2573 alloc_set_hash_table (n_insns)
2574 int n_insns;
2575 {
2576 int n;
2577
2578 set_hash_table_size = n_insns / 4;
2579 if (set_hash_table_size < 11)
2580 set_hash_table_size = 11;
2581
2582 /* Attempt to maintain efficient use of hash table.
2583 Making it an odd number is simplest for now.
2584 ??? Later take some measurements. */
2585 set_hash_table_size |= 1;
2586 n = set_hash_table_size * sizeof (struct expr *);
2587 set_hash_table = (struct expr **) gmalloc (n);
2588 }
2589
2590 /* Free things allocated by alloc_set_hash_table. */
2591
2592 static void
2593 free_set_hash_table ()
2594 {
2595 free (set_hash_table);
2596 }
2597
2598 /* Compute the hash table for doing copy/const propagation. */
2599
2600 static void
2601 compute_set_hash_table ()
2602 {
2603 /* Initialize count of number of entries in hash table. */
2604 n_sets = 0;
2605 memset ((char *) set_hash_table, 0,
2606 set_hash_table_size * sizeof (struct expr *));
2607
2608 compute_hash_table (1);
2609 }
2610
2611 /* Allocate space for the expression hash table.
2612 N_INSNS is the number of instructions in the function.
2613 It is used to determine the number of buckets to use. */
2614
2615 static void
2616 alloc_expr_hash_table (n_insns)
2617 unsigned int n_insns;
2618 {
2619 int n;
2620
2621 expr_hash_table_size = n_insns / 2;
2622 /* Make sure the amount is usable. */
2623 if (expr_hash_table_size < 11)
2624 expr_hash_table_size = 11;
2625
2626 /* Attempt to maintain efficient use of hash table.
2627 Making it an odd number is simplest for now.
2628 ??? Later take some measurements. */
2629 expr_hash_table_size |= 1;
2630 n = expr_hash_table_size * sizeof (struct expr *);
2631 expr_hash_table = (struct expr **) gmalloc (n);
2632 }
2633
2634 /* Free things allocated by alloc_expr_hash_table. */
2635
2636 static void
2637 free_expr_hash_table ()
2638 {
2639 free (expr_hash_table);
2640 }
2641
2642 /* Compute the hash table for doing GCSE. */
2643
2644 static void
2645 compute_expr_hash_table ()
2646 {
2647 /* Initialize count of number of entries in hash table. */
2648 n_exprs = 0;
2649 memset ((char *) expr_hash_table, 0,
2650 expr_hash_table_size * sizeof (struct expr *));
2651
2652 compute_hash_table (0);
2653 }
2654 \f
2655 /* Expression tracking support. */
2656
2657 /* Lookup pattern PAT in the expression table.
2658 The result is a pointer to the table entry, or NULL if not found. */
2659
2660 static struct expr *
2661 lookup_expr (pat)
2662 rtx pat;
2663 {
2664 int do_not_record_p;
2665 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2666 expr_hash_table_size);
2667 struct expr *expr;
2668
2669 if (do_not_record_p)
2670 return NULL;
2671
2672 expr = expr_hash_table[hash];
2673
2674 while (expr && ! expr_equiv_p (expr->expr, pat))
2675 expr = expr->next_same_hash;
2676
2677 return expr;
2678 }
2679
2680 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2681 matches it, otherwise return the first entry for REGNO. The result is a
2682 pointer to the table entry, or NULL if not found. */
2683
2684 static struct expr *
2685 lookup_set (regno, pat)
2686 unsigned int regno;
2687 rtx pat;
2688 {
2689 unsigned int hash = hash_set (regno, set_hash_table_size);
2690 struct expr *expr;
2691
2692 expr = set_hash_table[hash];
2693
2694 if (pat)
2695 {
2696 while (expr && ! expr_equiv_p (expr->expr, pat))
2697 expr = expr->next_same_hash;
2698 }
2699 else
2700 {
2701 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2702 expr = expr->next_same_hash;
2703 }
2704
2705 return expr;
2706 }
2707
2708 /* Return the next entry for REGNO in list EXPR. */
2709
2710 static struct expr *
2711 next_set (regno, expr)
2712 unsigned int regno;
2713 struct expr *expr;
2714 {
2715 do
2716 expr = expr->next_same_hash;
2717 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2718
2719 return expr;
2720 }
2721
2722 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2723 types may be mixed. */
2724
2725 static void
2726 free_insn_expr_list_list (listp)
2727 rtx *listp;
2728 {
2729 rtx list, next;
2730
2731 for (list = *listp; list ; list = next)
2732 {
2733 next = XEXP (list, 1);
2734 if (GET_CODE (list) == EXPR_LIST)
2735 free_EXPR_LIST_node (list);
2736 else
2737 free_INSN_LIST_node (list);
2738 }
2739
2740 *listp = NULL;
2741 }
2742
2743 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2744 static void
2745 clear_modify_mem_tables ()
2746 {
2747 int i;
2748
2749 EXECUTE_IF_SET_IN_BITMAP
2750 (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
2751 bitmap_clear (modify_mem_list_set);
2752
2753 EXECUTE_IF_SET_IN_BITMAP
2754 (canon_modify_mem_list_set, 0, i,
2755 free_insn_expr_list_list (canon_modify_mem_list + i));
2756 bitmap_clear (canon_modify_mem_list_set);
2757 }
2758
2759 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2760
2761 static void
2762 free_modify_mem_tables ()
2763 {
2764 clear_modify_mem_tables ();
2765 free (modify_mem_list);
2766 free (canon_modify_mem_list);
2767 modify_mem_list = 0;
2768 canon_modify_mem_list = 0;
2769 }
2770
2771 /* Reset tables used to keep track of what's still available [since the
2772 start of the block]. */
2773
2774 static void
2775 reset_opr_set_tables ()
2776 {
2777 /* Maintain a bitmap of which regs have been set since beginning of
2778 the block. */
2779 CLEAR_REG_SET (reg_set_bitmap);
2780
2781 /* Also keep a record of the last instruction to modify memory.
2782 For now this is very trivial, we only record whether any memory
2783 location has been modified. */
2784 clear_modify_mem_tables ();
2785 }
2786
2787 /* Return non-zero if the operands of X are not set before INSN in
2788 INSN's basic block. */
2789
2790 static int
2791 oprs_not_set_p (x, insn)
2792 rtx x, insn;
2793 {
2794 int i, j;
2795 enum rtx_code code;
2796 const char *fmt;
2797
2798 if (x == 0)
2799 return 1;
2800
2801 code = GET_CODE (x);
2802 switch (code)
2803 {
2804 case PC:
2805 case CC0:
2806 case CONST:
2807 case CONST_INT:
2808 case CONST_DOUBLE:
2809 case CONST_VECTOR:
2810 case SYMBOL_REF:
2811 case LABEL_REF:
2812 case ADDR_VEC:
2813 case ADDR_DIFF_VEC:
2814 return 1;
2815
2816 case MEM:
2817 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2818 INSN_CUID (insn), x, 0))
2819 return 0;
2820 else
2821 return oprs_not_set_p (XEXP (x, 0), insn);
2822
2823 case REG:
2824 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2825
2826 default:
2827 break;
2828 }
2829
2830 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2831 {
2832 if (fmt[i] == 'e')
2833 {
2834 /* If we are about to do the last recursive call
2835 needed at this level, change it into iteration.
2836 This function is called enough to be worth it. */
2837 if (i == 0)
2838 return oprs_not_set_p (XEXP (x, i), insn);
2839
2840 if (! oprs_not_set_p (XEXP (x, i), insn))
2841 return 0;
2842 }
2843 else if (fmt[i] == 'E')
2844 for (j = 0; j < XVECLEN (x, i); j++)
2845 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2846 return 0;
2847 }
2848
2849 return 1;
2850 }
2851
2852 /* Mark things set by a CALL. */
2853
2854 static void
2855 mark_call (insn)
2856 rtx insn;
2857 {
2858 if (! CONST_OR_PURE_CALL_P (insn))
2859 record_last_mem_set_info (insn);
2860 }
2861
2862 /* Mark things set by a SET. */
2863
2864 static void
2865 mark_set (pat, insn)
2866 rtx pat, insn;
2867 {
2868 rtx dest = SET_DEST (pat);
2869
2870 while (GET_CODE (dest) == SUBREG
2871 || GET_CODE (dest) == ZERO_EXTRACT
2872 || GET_CODE (dest) == SIGN_EXTRACT
2873 || GET_CODE (dest) == STRICT_LOW_PART)
2874 dest = XEXP (dest, 0);
2875
2876 if (GET_CODE (dest) == REG)
2877 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2878 else if (GET_CODE (dest) == MEM)
2879 record_last_mem_set_info (insn);
2880
2881 if (GET_CODE (SET_SRC (pat)) == CALL)
2882 mark_call (insn);
2883 }
2884
2885 /* Record things set by a CLOBBER. */
2886
2887 static void
2888 mark_clobber (pat, insn)
2889 rtx pat, insn;
2890 {
2891 rtx clob = XEXP (pat, 0);
2892
2893 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2894 clob = XEXP (clob, 0);
2895
2896 if (GET_CODE (clob) == REG)
2897 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2898 else
2899 record_last_mem_set_info (insn);
2900 }
2901
2902 /* Record things set by INSN.
2903 This data is used by oprs_not_set_p. */
2904
2905 static void
2906 mark_oprs_set (insn)
2907 rtx insn;
2908 {
2909 rtx pat = PATTERN (insn);
2910 int i;
2911
2912 if (GET_CODE (pat) == SET)
2913 mark_set (pat, insn);
2914 else if (GET_CODE (pat) == PARALLEL)
2915 for (i = 0; i < XVECLEN (pat, 0); i++)
2916 {
2917 rtx x = XVECEXP (pat, 0, i);
2918
2919 if (GET_CODE (x) == SET)
2920 mark_set (x, insn);
2921 else if (GET_CODE (x) == CLOBBER)
2922 mark_clobber (x, insn);
2923 else if (GET_CODE (x) == CALL)
2924 mark_call (insn);
2925 }
2926
2927 else if (GET_CODE (pat) == CLOBBER)
2928 mark_clobber (pat, insn);
2929 else if (GET_CODE (pat) == CALL)
2930 mark_call (insn);
2931 }
2932
2933 \f
2934 /* Classic GCSE reaching definition support. */
2935
2936 /* Allocate reaching def variables. */
2937
2938 static void
2939 alloc_rd_mem (n_blocks, n_insns)
2940 int n_blocks, n_insns;
2941 {
2942 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2943 sbitmap_vector_zero (rd_kill, n_blocks);
2944
2945 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2946 sbitmap_vector_zero (rd_gen, n_blocks);
2947
2948 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2949 sbitmap_vector_zero (reaching_defs, n_blocks);
2950
2951 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2952 sbitmap_vector_zero (rd_out, n_blocks);
2953 }
2954
2955 /* Free reaching def variables. */
2956
2957 static void
2958 free_rd_mem ()
2959 {
2960 sbitmap_vector_free (rd_kill);
2961 sbitmap_vector_free (rd_gen);
2962 sbitmap_vector_free (reaching_defs);
2963 sbitmap_vector_free (rd_out);
2964 }
2965
2966 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2967
2968 static void
2969 handle_rd_kill_set (insn, regno, bb)
2970 rtx insn;
2971 int regno;
2972 basic_block bb;
2973 {
2974 struct reg_set *this_reg;
2975
2976 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2977 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2978 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2979 }
2980
2981 /* Compute the set of kill's for reaching definitions. */
2982
2983 static void
2984 compute_kill_rd ()
2985 {
2986 int cuid;
2987 unsigned int regno;
2988 int i;
2989 basic_block bb;
2990
2991 /* For each block
2992 For each set bit in `gen' of the block (i.e each insn which
2993 generates a definition in the block)
2994 Call the reg set by the insn corresponding to that bit regx
2995 Look at the linked list starting at reg_set_table[regx]
2996 For each setting of regx in the linked list, which is not in
2997 this block
2998 Set the bit in `kill' corresponding to that insn. */
2999 FOR_EACH_BB (bb)
3000 for (cuid = 0; cuid < max_cuid; cuid++)
3001 if (TEST_BIT (rd_gen[bb->index], cuid))
3002 {
3003 rtx insn = CUID_INSN (cuid);
3004 rtx pat = PATTERN (insn);
3005
3006 if (GET_CODE (insn) == CALL_INSN)
3007 {
3008 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
3009 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
3010 handle_rd_kill_set (insn, regno, bb);
3011 }
3012
3013 if (GET_CODE (pat) == PARALLEL)
3014 {
3015 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3016 {
3017 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
3018
3019 if ((code == SET || code == CLOBBER)
3020 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
3021 handle_rd_kill_set (insn,
3022 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
3023 bb);
3024 }
3025 }
3026 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
3027 /* Each setting of this register outside of this block
3028 must be marked in the set of kills in this block. */
3029 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
3030 }
3031 }
3032
3033 /* Compute the reaching definitions as in
3034 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
3035 Chapter 10. It is the same algorithm as used for computing available
3036 expressions but applied to the gens and kills of reaching definitions. */
3037
3038 static void
3039 compute_rd ()
3040 {
3041 int changed, passes;
3042 basic_block bb;
3043
3044 FOR_EACH_BB (bb)
3045 sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
3046
3047 passes = 0;
3048 changed = 1;
3049 while (changed)
3050 {
3051 changed = 0;
3052 FOR_EACH_BB (bb)
3053 {
3054 sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
3055 changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
3056 reaching_defs[bb->index], rd_kill[bb->index]);
3057 }
3058 passes++;
3059 }
3060
3061 if (gcse_file)
3062 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3063 }
3064 \f
3065 /* Classic GCSE available expression support. */
3066
3067 /* Allocate memory for available expression computation. */
3068
3069 static void
3070 alloc_avail_expr_mem (n_blocks, n_exprs)
3071 int n_blocks, n_exprs;
3072 {
3073 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3074 sbitmap_vector_zero (ae_kill, n_blocks);
3075
3076 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3077 sbitmap_vector_zero (ae_gen, n_blocks);
3078
3079 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3080 sbitmap_vector_zero (ae_in, n_blocks);
3081
3082 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3083 sbitmap_vector_zero (ae_out, n_blocks);
3084 }
3085
3086 static void
3087 free_avail_expr_mem ()
3088 {
3089 sbitmap_vector_free (ae_kill);
3090 sbitmap_vector_free (ae_gen);
3091 sbitmap_vector_free (ae_in);
3092 sbitmap_vector_free (ae_out);
3093 }
3094
3095 /* Compute the set of available expressions generated in each basic block. */
3096
3097 static void
3098 compute_ae_gen ()
3099 {
3100 unsigned int i;
3101 struct expr *expr;
3102 struct occr *occr;
3103
3104 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3105 This is all we have to do because an expression is not recorded if it
3106 is not available, and the only expressions we want to work with are the
3107 ones that are recorded. */
3108 for (i = 0; i < expr_hash_table_size; i++)
3109 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
3110 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3111 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3112 }
3113
3114 /* Return non-zero if expression X is killed in BB. */
3115
3116 static int
3117 expr_killed_p (x, bb)
3118 rtx x;
3119 basic_block bb;
3120 {
3121 int i, j;
3122 enum rtx_code code;
3123 const char *fmt;
3124
3125 if (x == 0)
3126 return 1;
3127
3128 code = GET_CODE (x);
3129 switch (code)
3130 {
3131 case REG:
3132 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3133
3134 case MEM:
3135 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3136 return 1;
3137 else
3138 return expr_killed_p (XEXP (x, 0), bb);
3139
3140 case PC:
3141 case CC0: /*FIXME*/
3142 case CONST:
3143 case CONST_INT:
3144 case CONST_DOUBLE:
3145 case CONST_VECTOR:
3146 case SYMBOL_REF:
3147 case LABEL_REF:
3148 case ADDR_VEC:
3149 case ADDR_DIFF_VEC:
3150 return 0;
3151
3152 default:
3153 break;
3154 }
3155
3156 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3157 {
3158 if (fmt[i] == 'e')
3159 {
3160 /* If we are about to do the last recursive call
3161 needed at this level, change it into iteration.
3162 This function is called enough to be worth it. */
3163 if (i == 0)
3164 return expr_killed_p (XEXP (x, i), bb);
3165 else if (expr_killed_p (XEXP (x, i), bb))
3166 return 1;
3167 }
3168 else if (fmt[i] == 'E')
3169 for (j = 0; j < XVECLEN (x, i); j++)
3170 if (expr_killed_p (XVECEXP (x, i, j), bb))
3171 return 1;
3172 }
3173
3174 return 0;
3175 }
3176
3177 /* Compute the set of available expressions killed in each basic block. */
3178
3179 static void
3180 compute_ae_kill (ae_gen, ae_kill)
3181 sbitmap *ae_gen, *ae_kill;
3182 {
3183 basic_block bb;
3184 unsigned int i;
3185 struct expr *expr;
3186
3187 FOR_EACH_BB (bb)
3188 for (i = 0; i < expr_hash_table_size; i++)
3189 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
3190 {
3191 /* Skip EXPR if generated in this block. */
3192 if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
3193 continue;
3194
3195 if (expr_killed_p (expr->expr, bb))
3196 SET_BIT (ae_kill[bb->index], expr->bitmap_index);
3197 }
3198 }
3199 \f
3200 /* Actually perform the Classic GCSE optimizations. */
3201
3202 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
3203
3204 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
3205 as a positive reach. We want to do this when there are two computations
3206 of the expression in the block.
3207
3208 VISITED is a pointer to a working buffer for tracking which BB's have
3209 been visited. It is NULL for the top-level call.
3210
3211 We treat reaching expressions that go through blocks containing the same
3212 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3213 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3214 2 as not reaching. The intent is to improve the probability of finding
3215 only one reaching expression and to reduce register lifetimes by picking
3216 the closest such expression. */
3217
3218 static int
3219 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3220 struct occr *occr;
3221 struct expr *expr;
3222 basic_block bb;
3223 int check_self_loop;
3224 char *visited;
3225 {
3226 edge pred;
3227
3228 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3229 {
3230 basic_block pred_bb = pred->src;
3231
3232 if (visited[pred_bb->index])
3233 /* This predecessor has already been visited. Nothing to do. */
3234 ;
3235 else if (pred_bb == bb)
3236 {
3237 /* BB loops on itself. */
3238 if (check_self_loop
3239 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3240 && BLOCK_NUM (occr->insn) == pred_bb->index)
3241 return 1;
3242
3243 visited[pred_bb->index] = 1;
3244 }
3245
3246 /* Ignore this predecessor if it kills the expression. */
3247 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3248 visited[pred_bb->index] = 1;
3249
3250 /* Does this predecessor generate this expression? */
3251 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3252 {
3253 /* Is this the occurrence we're looking for?
3254 Note that there's only one generating occurrence per block
3255 so we just need to check the block number. */
3256 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3257 return 1;
3258
3259 visited[pred_bb->index] = 1;
3260 }
3261
3262 /* Neither gen nor kill. */
3263 else
3264 {
3265 visited[pred_bb->index] = 1;
3266 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3267 visited))
3268
3269 return 1;
3270 }
3271 }
3272
3273 /* All paths have been checked. */
3274 return 0;
3275 }
3276
3277 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3278 memory allocated for that function is returned. */
3279
3280 static int
3281 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3282 struct occr *occr;
3283 struct expr *expr;
3284 basic_block bb;
3285 int check_self_loop;
3286 {
3287 int rval;
3288 char *visited = (char *) xcalloc (last_basic_block, 1);
3289
3290 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3291
3292 free (visited);
3293 return rval;
3294 }
3295
3296 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3297 If there is more than one such instruction, return NULL.
3298
3299 Called only by handle_avail_expr. */
3300
3301 static rtx
3302 computing_insn (expr, insn)
3303 struct expr *expr;
3304 rtx insn;
3305 {
3306 basic_block bb = BLOCK_FOR_INSN (insn);
3307
3308 if (expr->avail_occr->next == NULL)
3309 {
3310 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3311 /* The available expression is actually itself
3312 (i.e. a loop in the flow graph) so do nothing. */
3313 return NULL;
3314
3315 /* (FIXME) Case that we found a pattern that was created by
3316 a substitution that took place. */
3317 return expr->avail_occr->insn;
3318 }
3319 else
3320 {
3321 /* Pattern is computed more than once.
3322 Search backwards from this insn to see how many of these
3323 computations actually reach this insn. */
3324 struct occr *occr;
3325 rtx insn_computes_expr = NULL;
3326 int can_reach = 0;
3327
3328 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3329 {
3330 if (BLOCK_FOR_INSN (occr->insn) == bb)
3331 {
3332 /* The expression is generated in this block.
3333 The only time we care about this is when the expression
3334 is generated later in the block [and thus there's a loop].
3335 We let the normal cse pass handle the other cases. */
3336 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3337 && expr_reaches_here_p (occr, expr, bb, 1))
3338 {
3339 can_reach++;
3340 if (can_reach > 1)
3341 return NULL;
3342
3343 insn_computes_expr = occr->insn;
3344 }
3345 }
3346 else if (expr_reaches_here_p (occr, expr, bb, 0))
3347 {
3348 can_reach++;
3349 if (can_reach > 1)
3350 return NULL;
3351
3352 insn_computes_expr = occr->insn;
3353 }
3354 }
3355
3356 if (insn_computes_expr == NULL)
3357 abort ();
3358
3359 return insn_computes_expr;
3360 }
3361 }
3362
3363 /* Return non-zero if the definition in DEF_INSN can reach INSN.
3364 Only called by can_disregard_other_sets. */
3365
3366 static int
3367 def_reaches_here_p (insn, def_insn)
3368 rtx insn, def_insn;
3369 {
3370 rtx reg;
3371
3372 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3373 return 1;
3374
3375 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3376 {
3377 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3378 {
3379 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3380 return 1;
3381 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3382 reg = XEXP (PATTERN (def_insn), 0);
3383 else if (GET_CODE (PATTERN (def_insn)) == SET)
3384 reg = SET_DEST (PATTERN (def_insn));
3385 else
3386 abort ();
3387
3388 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3389 }
3390 else
3391 return 0;
3392 }
3393
3394 return 0;
3395 }
3396
3397 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
3398 value returned is the number of definitions that reach INSN. Returning a
3399 value of zero means that [maybe] more than one definition reaches INSN and
3400 the caller can't perform whatever optimization it is trying. i.e. it is
3401 always safe to return zero. */
3402
3403 static int
3404 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3405 struct reg_set **addr_this_reg;
3406 rtx insn;
3407 int for_combine;
3408 {
3409 int number_of_reaching_defs = 0;
3410 struct reg_set *this_reg;
3411
3412 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3413 if (def_reaches_here_p (insn, this_reg->insn))
3414 {
3415 number_of_reaching_defs++;
3416 /* Ignore parallels for now. */
3417 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3418 return 0;
3419
3420 if (!for_combine
3421 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3422 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3423 SET_SRC (PATTERN (insn)))))
3424 /* A setting of the reg to a different value reaches INSN. */
3425 return 0;
3426
3427 if (number_of_reaching_defs > 1)
3428 {
3429 /* If in this setting the value the register is being set to is
3430 equal to the previous value the register was set to and this
3431 setting reaches the insn we are trying to do the substitution
3432 on then we are ok. */
3433 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3434 return 0;
3435 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3436 SET_SRC (PATTERN (insn))))
3437 return 0;
3438 }
3439
3440 *addr_this_reg = this_reg;
3441 }
3442
3443 return number_of_reaching_defs;
3444 }
3445
3446 /* Expression computed by insn is available and the substitution is legal,
3447 so try to perform the substitution.
3448
3449 The result is non-zero if any changes were made. */
3450
3451 static int
3452 handle_avail_expr (insn, expr)
3453 rtx insn;
3454 struct expr *expr;
3455 {
3456 rtx pat, insn_computes_expr, expr_set;
3457 rtx to;
3458 struct reg_set *this_reg;
3459 int found_setting, use_src;
3460 int changed = 0;
3461
3462 /* We only handle the case where one computation of the expression
3463 reaches this instruction. */
3464 insn_computes_expr = computing_insn (expr, insn);
3465 if (insn_computes_expr == NULL)
3466 return 0;
3467 expr_set = single_set (insn_computes_expr);
3468 if (!expr_set)
3469 abort ();
3470
3471 found_setting = 0;
3472 use_src = 0;
3473
3474 /* At this point we know only one computation of EXPR outside of this
3475 block reaches this insn. Now try to find a register that the
3476 expression is computed into. */
3477 if (GET_CODE (SET_SRC (expr_set)) == REG)
3478 {
3479 /* This is the case when the available expression that reaches
3480 here has already been handled as an available expression. */
3481 unsigned int regnum_for_replacing
3482 = REGNO (SET_SRC (expr_set));
3483
3484 /* If the register was created by GCSE we can't use `reg_set_table',
3485 however we know it's set only once. */
3486 if (regnum_for_replacing >= max_gcse_regno
3487 /* If the register the expression is computed into is set only once,
3488 or only one set reaches this insn, we can use it. */
3489 || (((this_reg = reg_set_table[regnum_for_replacing]),
3490 this_reg->next == NULL)
3491 || can_disregard_other_sets (&this_reg, insn, 0)))
3492 {
3493 use_src = 1;
3494 found_setting = 1;
3495 }
3496 }
3497
3498 if (!found_setting)
3499 {
3500 unsigned int regnum_for_replacing
3501 = REGNO (SET_DEST (expr_set));
3502
3503 /* This shouldn't happen. */
3504 if (regnum_for_replacing >= max_gcse_regno)
3505 abort ();
3506
3507 this_reg = reg_set_table[regnum_for_replacing];
3508
3509 /* If the register the expression is computed into is set only once,
3510 or only one set reaches this insn, use it. */
3511 if (this_reg->next == NULL
3512 || can_disregard_other_sets (&this_reg, insn, 0))
3513 found_setting = 1;
3514 }
3515
3516 if (found_setting)
3517 {
3518 pat = PATTERN (insn);
3519 if (use_src)
3520 to = SET_SRC (expr_set);
3521 else
3522 to = SET_DEST (expr_set);
3523 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3524
3525 /* We should be able to ignore the return code from validate_change but
3526 to play it safe we check. */
3527 if (changed)
3528 {
3529 gcse_subst_count++;
3530 if (gcse_file != NULL)
3531 {
3532 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3533 INSN_UID (insn));
3534 fprintf (gcse_file, " reg %d %s insn %d\n",
3535 REGNO (to), use_src ? "from" : "set in",
3536 INSN_UID (insn_computes_expr));
3537 }
3538 }
3539 }
3540
3541 /* The register that the expr is computed into is set more than once. */
3542 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3543 {
3544 /* Insert an insn after insnx that copies the reg set in insnx
3545 into a new pseudo register call this new register REGN.
3546 From insnb until end of basic block or until REGB is set
3547 replace all uses of REGB with REGN. */
3548 rtx new_insn;
3549
3550 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3551
3552 /* Generate the new insn. */
3553 /* ??? If the change fails, we return 0, even though we created
3554 an insn. I think this is ok. */
3555 new_insn
3556 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3557 SET_DEST (expr_set)),
3558 insn_computes_expr);
3559
3560 /* Keep register set table up to date. */
3561 record_one_set (REGNO (to), new_insn);
3562
3563 gcse_create_count++;
3564 if (gcse_file != NULL)
3565 {
3566 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3567 INSN_UID (NEXT_INSN (insn_computes_expr)),
3568 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3569 fprintf (gcse_file, ", computed in insn %d,\n",
3570 INSN_UID (insn_computes_expr));
3571 fprintf (gcse_file, " into newly allocated reg %d\n",
3572 REGNO (to));
3573 }
3574
3575 pat = PATTERN (insn);
3576
3577 /* Do register replacement for INSN. */
3578 changed = validate_change (insn, &SET_SRC (pat),
3579 SET_DEST (PATTERN
3580 (NEXT_INSN (insn_computes_expr))),
3581 0);
3582
3583 /* We should be able to ignore the return code from validate_change but
3584 to play it safe we check. */
3585 if (changed)
3586 {
3587 gcse_subst_count++;
3588 if (gcse_file != NULL)
3589 {
3590 fprintf (gcse_file,
3591 "GCSE: Replacing the source in insn %d with reg %d ",
3592 INSN_UID (insn),
3593 REGNO (SET_DEST (PATTERN (NEXT_INSN
3594 (insn_computes_expr)))));
3595 fprintf (gcse_file, "set in insn %d\n",
3596 INSN_UID (insn_computes_expr));
3597 }
3598 }
3599 }
3600
3601 return changed;
3602 }
3603
3604 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3605 the dataflow analysis has been done.
3606
3607 The result is non-zero if a change was made. */
3608
3609 static int
3610 classic_gcse ()
3611 {
3612 int changed;
3613 rtx insn;
3614 basic_block bb;
3615
3616 /* Note we start at block 1. */
3617
3618 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3619 return 0;
3620
3621 changed = 0;
3622 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3623 {
3624 /* Reset tables used to keep track of what's still valid [since the
3625 start of the block]. */
3626 reset_opr_set_tables ();
3627
3628 for (insn = bb->head;
3629 insn != NULL && insn != NEXT_INSN (bb->end);
3630 insn = NEXT_INSN (insn))
3631 {
3632 /* Is insn of form (set (pseudo-reg) ...)? */
3633 if (GET_CODE (insn) == INSN
3634 && GET_CODE (PATTERN (insn)) == SET
3635 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3636 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3637 {
3638 rtx pat = PATTERN (insn);
3639 rtx src = SET_SRC (pat);
3640 struct expr *expr;
3641
3642 if (want_to_gcse_p (src)
3643 /* Is the expression recorded? */
3644 && ((expr = lookup_expr (src)) != NULL)
3645 /* Is the expression available [at the start of the
3646 block]? */
3647 && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
3648 /* Are the operands unchanged since the start of the
3649 block? */
3650 && oprs_not_set_p (src, insn))
3651 changed |= handle_avail_expr (insn, expr);
3652 }
3653
3654 /* Keep track of everything modified by this insn. */
3655 /* ??? Need to be careful w.r.t. mods done to INSN. */
3656 if (INSN_P (insn))
3657 mark_oprs_set (insn);
3658 }
3659 }
3660
3661 return changed;
3662 }
3663
3664 /* Top level routine to perform one classic GCSE pass.
3665
3666 Return non-zero if a change was made. */
3667
3668 static int
3669 one_classic_gcse_pass (pass)
3670 int pass;
3671 {
3672 int changed = 0;
3673
3674 gcse_subst_count = 0;
3675 gcse_create_count = 0;
3676
3677 alloc_expr_hash_table (max_cuid);
3678 alloc_rd_mem (last_basic_block, max_cuid);
3679 compute_expr_hash_table ();
3680 if (gcse_file)
3681 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3682 expr_hash_table_size, n_exprs);
3683
3684 if (n_exprs > 0)
3685 {
3686 compute_kill_rd ();
3687 compute_rd ();
3688 alloc_avail_expr_mem (last_basic_block, n_exprs);
3689 compute_ae_gen ();
3690 compute_ae_kill (ae_gen, ae_kill);
3691 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3692 changed = classic_gcse ();
3693 free_avail_expr_mem ();
3694 }
3695
3696 free_rd_mem ();
3697 free_expr_hash_table ();
3698
3699 if (gcse_file)
3700 {
3701 fprintf (gcse_file, "\n");
3702 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3703 current_function_name, pass, bytes_used, gcse_subst_count);
3704 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3705 }
3706
3707 return changed;
3708 }
3709 \f
3710 /* Compute copy/constant propagation working variables. */
3711
3712 /* Local properties of assignments. */
3713 static sbitmap *cprop_pavloc;
3714 static sbitmap *cprop_absaltered;
3715
3716 /* Global properties of assignments (computed from the local properties). */
3717 static sbitmap *cprop_avin;
3718 static sbitmap *cprop_avout;
3719
3720 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3721 basic blocks. N_SETS is the number of sets. */
3722
3723 static void
3724 alloc_cprop_mem (n_blocks, n_sets)
3725 int n_blocks, n_sets;
3726 {
3727 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3728 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3729
3730 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3731 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3732 }
3733
3734 /* Free vars used by copy/const propagation. */
3735
3736 static void
3737 free_cprop_mem ()
3738 {
3739 sbitmap_vector_free (cprop_pavloc);
3740 sbitmap_vector_free (cprop_absaltered);
3741 sbitmap_vector_free (cprop_avin);
3742 sbitmap_vector_free (cprop_avout);
3743 }
3744
3745 /* For each block, compute whether X is transparent. X is either an
3746 expression or an assignment [though we don't care which, for this context
3747 an assignment is treated as an expression]. For each block where an
3748 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3749 bit in BMAP. */
3750
3751 static void
3752 compute_transp (x, indx, bmap, set_p)
3753 rtx x;
3754 int indx;
3755 sbitmap *bmap;
3756 int set_p;
3757 {
3758 int i, j;
3759 basic_block bb;
3760 enum rtx_code code;
3761 reg_set *r;
3762 const char *fmt;
3763
3764 /* repeat is used to turn tail-recursion into iteration since GCC
3765 can't do it when there's no return value. */
3766 repeat:
3767
3768 if (x == 0)
3769 return;
3770
3771 code = GET_CODE (x);
3772 switch (code)
3773 {
3774 case REG:
3775 if (set_p)
3776 {
3777 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3778 {
3779 FOR_EACH_BB (bb)
3780 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3781 SET_BIT (bmap[bb->index], indx);
3782 }
3783 else
3784 {
3785 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3786 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3787 }
3788 }
3789 else
3790 {
3791 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3792 {
3793 FOR_EACH_BB (bb)
3794 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3795 RESET_BIT (bmap[bb->index], indx);
3796 }
3797 else
3798 {
3799 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3800 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3801 }
3802 }
3803
3804 return;
3805
3806 case MEM:
3807 FOR_EACH_BB (bb)
3808 {
3809 rtx list_entry = canon_modify_mem_list[bb->index];
3810
3811 while (list_entry)
3812 {
3813 rtx dest, dest_addr;
3814
3815 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3816 {
3817 if (set_p)
3818 SET_BIT (bmap[bb->index], indx);
3819 else
3820 RESET_BIT (bmap[bb->index], indx);
3821 break;
3822 }
3823 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3824 Examine each hunk of memory that is modified. */
3825
3826 dest = XEXP (list_entry, 0);
3827 list_entry = XEXP (list_entry, 1);
3828 dest_addr = XEXP (list_entry, 0);
3829
3830 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3831 x, rtx_addr_varies_p))
3832 {
3833 if (set_p)
3834 SET_BIT (bmap[bb->index], indx);
3835 else
3836 RESET_BIT (bmap[bb->index], indx);
3837 break;
3838 }
3839 list_entry = XEXP (list_entry, 1);
3840 }
3841 }
3842
3843 x = XEXP (x, 0);
3844 goto repeat;
3845
3846 case PC:
3847 case CC0: /*FIXME*/
3848 case CONST:
3849 case CONST_INT:
3850 case CONST_DOUBLE:
3851 case CONST_VECTOR:
3852 case SYMBOL_REF:
3853 case LABEL_REF:
3854 case ADDR_VEC:
3855 case ADDR_DIFF_VEC:
3856 return;
3857
3858 default:
3859 break;
3860 }
3861
3862 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3863 {
3864 if (fmt[i] == 'e')
3865 {
3866 /* If we are about to do the last recursive call
3867 needed at this level, change it into iteration.
3868 This function is called enough to be worth it. */
3869 if (i == 0)
3870 {
3871 x = XEXP (x, i);
3872 goto repeat;
3873 }
3874
3875 compute_transp (XEXP (x, i), indx, bmap, set_p);
3876 }
3877 else if (fmt[i] == 'E')
3878 for (j = 0; j < XVECLEN (x, i); j++)
3879 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3880 }
3881 }
3882
3883 /* Top level routine to do the dataflow analysis needed by copy/const
3884 propagation. */
3885
3886 static void
3887 compute_cprop_data ()
3888 {
3889 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3890 compute_available (cprop_pavloc, cprop_absaltered,
3891 cprop_avout, cprop_avin);
3892 }
3893 \f
3894 /* Copy/constant propagation. */
3895
3896 /* Maximum number of register uses in an insn that we handle. */
3897 #define MAX_USES 8
3898
3899 /* Table of uses found in an insn.
3900 Allocated statically to avoid alloc/free complexity and overhead. */
3901 static struct reg_use reg_use_table[MAX_USES];
3902
3903 /* Index into `reg_use_table' while building it. */
3904 static int reg_use_count;
3905
3906 /* Set up a list of register numbers used in INSN. The found uses are stored
3907 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3908 and contains the number of uses in the table upon exit.
3909
3910 ??? If a register appears multiple times we will record it multiple times.
3911 This doesn't hurt anything but it will slow things down. */
3912
3913 static void
3914 find_used_regs (xptr, data)
3915 rtx *xptr;
3916 void *data ATTRIBUTE_UNUSED;
3917 {
3918 int i, j;
3919 enum rtx_code code;
3920 const char *fmt;
3921 rtx x = *xptr;
3922
3923 /* repeat is used to turn tail-recursion into iteration since GCC
3924 can't do it when there's no return value. */
3925 repeat:
3926 if (x == 0)
3927 return;
3928
3929 code = GET_CODE (x);
3930 if (REG_P (x))
3931 {
3932 if (reg_use_count == MAX_USES)
3933 return;
3934
3935 reg_use_table[reg_use_count].reg_rtx = x;
3936 reg_use_count++;
3937 }
3938
3939 /* Recursively scan the operands of this expression. */
3940
3941 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3942 {
3943 if (fmt[i] == 'e')
3944 {
3945 /* If we are about to do the last recursive call
3946 needed at this level, change it into iteration.
3947 This function is called enough to be worth it. */
3948 if (i == 0)
3949 {
3950 x = XEXP (x, 0);
3951 goto repeat;
3952 }
3953
3954 find_used_regs (&XEXP (x, i), data);
3955 }
3956 else if (fmt[i] == 'E')
3957 for (j = 0; j < XVECLEN (x, i); j++)
3958 find_used_regs (&XVECEXP (x, i, j), data);
3959 }
3960 }
3961
3962 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3963 Returns non-zero is successful. */
3964
3965 static int
3966 try_replace_reg (from, to, insn)
3967 rtx from, to, insn;
3968 {
3969 rtx note = find_reg_equal_equiv_note (insn);
3970 rtx src = 0;
3971 int success = 0;
3972 rtx set = single_set (insn);
3973
3974 success = validate_replace_src (from, to, insn);
3975
3976 /* If above failed and this is a single set, try to simplify the source of
3977 the set given our substitution. We could perhaps try this for multiple
3978 SETs, but it probably won't buy us anything. */
3979 if (!success && set != 0)
3980 {
3981 src = simplify_replace_rtx (SET_SRC (set), from, to);
3982
3983 if (!rtx_equal_p (src, SET_SRC (set))
3984 && validate_change (insn, &SET_SRC (set), src, 0))
3985 success = 1;
3986 }
3987
3988 /* If we've failed to do replacement, have a single SET, and don't already
3989 have a note, add a REG_EQUAL note to not lose information. */
3990 if (!success && note == 0 && set != 0)
3991 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3992
3993 /* If there is already a NOTE, update the expression in it with our
3994 replacement. */
3995 else if (note != 0)
3996 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3997
3998 /* REG_EQUAL may get simplified into register.
3999 We don't allow that. Remove that note. This code ought
4000 not to hapen, because previous code ought to syntetize
4001 reg-reg move, but be on the safe side. */
4002 if (note && REG_P (XEXP (note, 0)))
4003 remove_note (insn, note);
4004
4005 return success;
4006 }
4007
4008 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
4009 NULL no such set is found. */
4010
4011 static struct expr *
4012 find_avail_set (regno, insn)
4013 int regno;
4014 rtx insn;
4015 {
4016 /* SET1 contains the last set found that can be returned to the caller for
4017 use in a substitution. */
4018 struct expr *set1 = 0;
4019
4020 /* Loops are not possible here. To get a loop we would need two sets
4021 available at the start of the block containing INSN. ie we would
4022 need two sets like this available at the start of the block:
4023
4024 (set (reg X) (reg Y))
4025 (set (reg Y) (reg X))
4026
4027 This can not happen since the set of (reg Y) would have killed the
4028 set of (reg X) making it unavailable at the start of this block. */
4029 while (1)
4030 {
4031 rtx src;
4032 struct expr *set = lookup_set (regno, NULL_RTX);
4033
4034 /* Find a set that is available at the start of the block
4035 which contains INSN. */
4036 while (set)
4037 {
4038 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
4039 break;
4040 set = next_set (regno, set);
4041 }
4042
4043 /* If no available set was found we've reached the end of the
4044 (possibly empty) copy chain. */
4045 if (set == 0)
4046 break;
4047
4048 if (GET_CODE (set->expr) != SET)
4049 abort ();
4050
4051 src = SET_SRC (set->expr);
4052
4053 /* We know the set is available.
4054 Now check that SRC is ANTLOC (i.e. none of the source operands
4055 have changed since the start of the block).
4056
4057 If the source operand changed, we may still use it for the next
4058 iteration of this loop, but we may not use it for substitutions. */
4059
4060 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4061 set1 = set;
4062
4063 /* If the source of the set is anything except a register, then
4064 we have reached the end of the copy chain. */
4065 if (GET_CODE (src) != REG)
4066 break;
4067
4068 /* Follow the copy chain, ie start another iteration of the loop
4069 and see if we have an available copy into SRC. */
4070 regno = REGNO (src);
4071 }
4072
4073 /* SET1 holds the last set that was available and anticipatable at
4074 INSN. */
4075 return set1;
4076 }
4077
4078 /* Subroutine of cprop_insn that tries to propagate constants into
4079 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
4080 it is the instruction that immediately preceeds JUMP, and must be a
4081 single SET of a register. FROM is what we will try to replace,
4082 SRC is the constant we will try to substitute for it. Returns nonzero
4083 if a change was made. */
4084
4085 static int
4086 cprop_jump (bb, setcc, jump, from, src)
4087 basic_block bb;
4088 rtx setcc;
4089 rtx jump;
4090 rtx from;
4091 rtx src;
4092 {
4093 rtx new, new_set;
4094 rtx set = pc_set (jump);
4095
4096 /* First substitute in the INSN condition as the SET_SRC of the JUMP,
4097 then substitute that given values in this expanded JUMP. */
4098 if (setcc != NULL)
4099 {
4100 rtx setcc_set = single_set (setcc);
4101 new_set = simplify_replace_rtx (SET_SRC (set),
4102 SET_DEST (setcc_set),
4103 SET_SRC (setcc_set));
4104 }
4105 else
4106 new_set = set;
4107
4108 new = simplify_replace_rtx (new_set, from, src);
4109
4110 /* If no simplification can be made, then try the next
4111 register. */
4112 if (rtx_equal_p (new, new_set))
4113 return 0;
4114
4115 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4116 if (new == pc_rtx)
4117 delete_insn (jump);
4118 else
4119 {
4120 if (! validate_change (jump, &SET_SRC (set), new, 0))
4121 return 0;
4122
4123 /* If this has turned into an unconditional jump,
4124 then put a barrier after it so that the unreachable
4125 code will be deleted. */
4126 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4127 emit_barrier_after (jump);
4128 }
4129
4130 #ifdef HAVE_cc0
4131 /* Delete the cc0 setter. */
4132 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
4133 delete_insn (setcc);
4134 #endif
4135
4136 run_jump_opt_after_gcse = 1;
4137
4138 const_prop_count++;
4139 if (gcse_file != NULL)
4140 {
4141 fprintf (gcse_file,
4142 "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
4143 REGNO (from), INSN_UID (jump));
4144 print_rtl (gcse_file, src);
4145 fprintf (gcse_file, "\n");
4146 }
4147 purge_dead_edges (bb);
4148
4149 return 1;
4150 }
4151
4152 /* Perform constant and copy propagation on INSN.
4153 The result is non-zero if a change was made. */
4154
4155 static int
4156 cprop_insn (bb, insn, alter_jumps)
4157 basic_block bb;
4158 rtx insn;
4159 int alter_jumps;
4160 {
4161 struct reg_use *reg_used;
4162 int changed = 0;
4163 rtx note;
4164
4165 if (!INSN_P (insn))
4166 return 0;
4167
4168 reg_use_count = 0;
4169 note_uses (&PATTERN (insn), find_used_regs, NULL);
4170
4171 note = find_reg_equal_equiv_note (insn);
4172
4173 /* We may win even when propagating constants into notes. */
4174 if (note)
4175 find_used_regs (&XEXP (note, 0), NULL);
4176
4177 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
4178 reg_used++, reg_use_count--)
4179 {
4180 unsigned int regno = REGNO (reg_used->reg_rtx);
4181 rtx pat, src;
4182 struct expr *set;
4183
4184 /* Ignore registers created by GCSE.
4185 We do this because ... */
4186 if (regno >= max_gcse_regno)
4187 continue;
4188
4189 /* If the register has already been set in this block, there's
4190 nothing we can do. */
4191 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4192 continue;
4193
4194 /* Find an assignment that sets reg_used and is available
4195 at the start of the block. */
4196 set = find_avail_set (regno, insn);
4197 if (! set)
4198 continue;
4199
4200 pat = set->expr;
4201 /* ??? We might be able to handle PARALLELs. Later. */
4202 if (GET_CODE (pat) != SET)
4203 abort ();
4204
4205 src = SET_SRC (pat);
4206
4207 /* Constant propagation. */
4208 if (CONSTANT_P (src))
4209 {
4210 rtx sset;
4211
4212 /* Check for reg or cc0 setting instructions followed by
4213 conditional branch instructions first. */
4214 if (alter_jumps
4215 && (sset = single_set (insn)) != NULL
4216 && any_condjump_p (NEXT_INSN (insn))
4217 && onlyjump_p (NEXT_INSN (insn)))
4218 {
4219 rtx dest = SET_DEST (sset);
4220 if ((REG_P (dest) || CC0_P (dest))
4221 && cprop_jump (bb, insn, NEXT_INSN (insn),
4222 reg_used->reg_rtx, src))
4223 {
4224 changed = 1;
4225 break;
4226 }
4227 }
4228
4229 /* Handle normal insns next. */
4230 if (GET_CODE (insn) == INSN
4231 && try_replace_reg (reg_used->reg_rtx, src, insn))
4232 {
4233 changed = 1;
4234 const_prop_count++;
4235 if (gcse_file != NULL)
4236 {
4237 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
4238 regno);
4239 fprintf (gcse_file, "insn %d with constant ",
4240 INSN_UID (insn));
4241 print_rtl (gcse_file, src);
4242 fprintf (gcse_file, "\n");
4243 }
4244
4245 /* The original insn setting reg_used may or may not now be
4246 deletable. We leave the deletion to flow. */
4247 }
4248
4249 /* Try to propagate a CONST_INT into a conditional jump.
4250 We're pretty specific about what we will handle in this
4251 code, we can extend this as necessary over time.
4252
4253 Right now the insn in question must look like
4254 (set (pc) (if_then_else ...)) */
4255 else if (alter_jumps
4256 && any_condjump_p (insn)
4257 && onlyjump_p (insn))
4258 changed |= cprop_jump (bb, NULL, insn, reg_used->reg_rtx, src);
4259
4260 }
4261 else if (GET_CODE (src) == REG
4262 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4263 && REGNO (src) != regno)
4264 {
4265 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4266 {
4267 changed = 1;
4268 copy_prop_count++;
4269 if (gcse_file != NULL)
4270 {
4271 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
4272 regno, INSN_UID (insn));
4273 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4274 }
4275
4276 /* The original insn setting reg_used may or may not now be
4277 deletable. We leave the deletion to flow. */
4278 /* FIXME: If it turns out that the insn isn't deletable,
4279 then we may have unnecessarily extended register lifetimes
4280 and made things worse. */
4281 }
4282 }
4283 }
4284
4285 return changed;
4286 }
4287
4288 /* Forward propagate copies. This includes copies and constants. Return
4289 non-zero if a change was made. */
4290
4291 static int
4292 cprop (alter_jumps)
4293 int alter_jumps;
4294 {
4295 int changed;
4296 basic_block bb;
4297 rtx insn;
4298
4299 /* Note we start at block 1. */
4300 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4301 {
4302 if (gcse_file != NULL)
4303 fprintf (gcse_file, "\n");
4304 return 0;
4305 }
4306
4307 changed = 0;
4308 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
4309 {
4310 /* Reset tables used to keep track of what's still valid [since the
4311 start of the block]. */
4312 reset_opr_set_tables ();
4313
4314 for (insn = bb->head;
4315 insn != NULL && insn != NEXT_INSN (bb->end);
4316 insn = NEXT_INSN (insn))
4317 if (INSN_P (insn))
4318 {
4319 changed |= cprop_insn (bb, insn, alter_jumps);
4320
4321 /* Keep track of everything modified by this insn. */
4322 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4323 call mark_oprs_set if we turned the insn into a NOTE. */
4324 if (GET_CODE (insn) != NOTE)
4325 mark_oprs_set (insn);
4326 }
4327 }
4328
4329 if (gcse_file != NULL)
4330 fprintf (gcse_file, "\n");
4331
4332 return changed;
4333 }
4334
4335 /* Perform one copy/constant propagation pass.
4336 F is the first insn in the function.
4337 PASS is the pass count. */
4338
4339 static int
4340 one_cprop_pass (pass, alter_jumps)
4341 int pass;
4342 int alter_jumps;
4343 {
4344 int changed = 0;
4345
4346 const_prop_count = 0;
4347 copy_prop_count = 0;
4348
4349 alloc_set_hash_table (max_cuid);
4350 compute_set_hash_table ();
4351 if (gcse_file)
4352 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
4353 n_sets);
4354 if (n_sets > 0)
4355 {
4356 alloc_cprop_mem (last_basic_block, n_sets);
4357 compute_cprop_data ();
4358 changed = cprop (alter_jumps);
4359 if (alter_jumps)
4360 changed |= bypass_conditional_jumps ();
4361 free_cprop_mem ();
4362 }
4363
4364 free_set_hash_table ();
4365
4366 if (gcse_file)
4367 {
4368 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4369 current_function_name, pass, bytes_used);
4370 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4371 const_prop_count, copy_prop_count);
4372 }
4373
4374 return changed;
4375 }
4376 \f
4377 /* Bypass conditional jumps. */
4378
4379 /* Find a set of REGNO to a constant that is available at the end of basic
4380 block BB. Returns NULL if no such set is found. Based heavily upon
4381 find_avail_set. */
4382
4383 static struct expr *
4384 find_bypass_set (regno, bb)
4385 int regno;
4386 int bb;
4387 {
4388 struct expr *result = 0;
4389
4390 for (;;)
4391 {
4392 rtx src;
4393 struct expr *set = lookup_set (regno, NULL_RTX);
4394
4395 while (set)
4396 {
4397 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
4398 break;
4399 set = next_set (regno, set);
4400 }
4401
4402 if (set == 0)
4403 break;
4404
4405 if (GET_CODE (set->expr) != SET)
4406 abort ();
4407
4408 src = SET_SRC (set->expr);
4409 if (CONSTANT_P (src))
4410 result = set;
4411
4412 if (GET_CODE (src) != REG)
4413 break;
4414
4415 regno = REGNO (src);
4416 }
4417 return result;
4418 }
4419
4420
4421 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
4422 basic block BB which has more than one predecessor. If not NULL, SETCC
4423 is the first instruction of BB, which is immediately followed by JUMP_INSN
4424 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
4425 Returns nonzero if a change was made. */
4426
4427 static int
4428 bypass_block (bb, setcc, jump)
4429 basic_block bb;
4430 rtx setcc, jump;
4431 {
4432 rtx insn, note;
4433 edge e, enext;
4434 int i, change;
4435
4436 insn = (setcc != NULL) ? setcc : jump;
4437
4438 /* Determine set of register uses in INSN. */
4439 reg_use_count = 0;
4440 note_uses (&PATTERN (insn), find_used_regs, NULL);
4441 note = find_reg_equal_equiv_note (insn);
4442 if (note)
4443 find_used_regs (&XEXP (note, 0), NULL);
4444
4445 change = 0;
4446 for (e = bb->pred; e; e = enext)
4447 {
4448 enext = e->pred_next;
4449 for (i = 0; i < reg_use_count; i++)
4450 {
4451 struct reg_use *reg_used = &reg_use_table[i];
4452 unsigned int regno = REGNO (reg_used->reg_rtx);
4453 basic_block dest, old_dest;
4454 struct expr *set;
4455 rtx src, new;
4456
4457 if (regno >= max_gcse_regno)
4458 continue;
4459
4460 set = find_bypass_set (regno, e->src->index);
4461
4462 if (! set)
4463 continue;
4464
4465 src = SET_SRC (pc_set (jump));
4466
4467 if (setcc != NULL)
4468 src = simplify_replace_rtx (src,
4469 SET_DEST (PATTERN (setcc)),
4470 SET_SRC (PATTERN (setcc)));
4471
4472 new = simplify_replace_rtx (src, reg_used->reg_rtx,
4473 SET_SRC (set->expr));
4474
4475 if (new == pc_rtx)
4476 dest = FALLTHRU_EDGE (bb)->dest;
4477 else if (GET_CODE (new) == LABEL_REF)
4478 dest = BRANCH_EDGE (bb)->dest;
4479 else
4480 dest = NULL;
4481
4482 /* Once basic block indices are stable, we should be able
4483 to use redirect_edge_and_branch_force instead. */
4484 old_dest = e->dest;
4485 if (dest != NULL && dest != old_dest
4486 && redirect_edge_and_branch (e, dest))
4487 {
4488 /* Copy the register setter to the redirected edge.
4489 Don't copy CC0 setters, as CC0 is dead after jump. */
4490 if (setcc)
4491 {
4492 rtx pat = PATTERN (setcc);
4493 if (!CC0_P (SET_DEST (pat)))
4494 insert_insn_on_edge (copy_insn (pat), e);
4495 }
4496
4497 if (gcse_file != NULL)
4498 {
4499 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
4500 regno, INSN_UID (jump));
4501 print_rtl (gcse_file, SET_SRC (set->expr));
4502 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
4503 e->src->index, old_dest->index, dest->index);
4504 }
4505 change = 1;
4506 break;
4507 }
4508 }
4509 }
4510 return change;
4511 }
4512
4513 /* Find basic blocks with more than one predecessor that only contain a
4514 single conditional jump. If the result of the comparison is known at
4515 compile-time from any incoming edge, redirect that edge to the
4516 appropriate target. Returns nonzero if a change was made. */
4517
4518 static int
4519 bypass_conditional_jumps ()
4520 {
4521 basic_block bb;
4522 int changed;
4523 rtx setcc;
4524 rtx insn;
4525 rtx dest;
4526
4527 /* Note we start at block 1. */
4528 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4529 return 0;
4530
4531 changed = 0;
4532 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
4533 EXIT_BLOCK_PTR, next_bb)
4534 {
4535 /* Check for more than one predecessor. */
4536 if (bb->pred && bb->pred->pred_next)
4537 {
4538 setcc = NULL_RTX;
4539 for (insn = bb->head;
4540 insn != NULL && insn != NEXT_INSN (bb->end);
4541 insn = NEXT_INSN (insn))
4542 if (GET_CODE (insn) == INSN)
4543 {
4544 if (!setcc)
4545 break;
4546 if (GET_CODE (PATTERN (insn)) != SET)
4547 break;
4548
4549 dest = SET_DEST (PATTERN (insn));
4550 if (REG_P (dest) || CC0_P (dest))
4551 setcc = insn;
4552 else
4553 break;
4554 }
4555 else if (GET_CODE (insn) == JUMP_INSN)
4556 {
4557 if (any_condjump_p (insn) && onlyjump_p (insn))
4558 changed |= bypass_block (bb, setcc, insn);
4559 break;
4560 }
4561 else if (INSN_P (insn))
4562 break;
4563 }
4564 }
4565
4566 /* If we bypassed any register setting insns, we inserted a
4567 copy on the redirected edge. These need to be commited. */
4568 if (changed)
4569 commit_edge_insertions();
4570
4571 return changed;
4572 }
4573 \f
4574 /* Compute PRE+LCM working variables. */
4575
4576 /* Local properties of expressions. */
4577 /* Nonzero for expressions that are transparent in the block. */
4578 static sbitmap *transp;
4579
4580 /* Nonzero for expressions that are transparent at the end of the block.
4581 This is only zero for expressions killed by abnormal critical edge
4582 created by a calls. */
4583 static sbitmap *transpout;
4584
4585 /* Nonzero for expressions that are computed (available) in the block. */
4586 static sbitmap *comp;
4587
4588 /* Nonzero for expressions that are locally anticipatable in the block. */
4589 static sbitmap *antloc;
4590
4591 /* Nonzero for expressions where this block is an optimal computation
4592 point. */
4593 static sbitmap *pre_optimal;
4594
4595 /* Nonzero for expressions which are redundant in a particular block. */
4596 static sbitmap *pre_redundant;
4597
4598 /* Nonzero for expressions which should be inserted on a specific edge. */
4599 static sbitmap *pre_insert_map;
4600
4601 /* Nonzero for expressions which should be deleted in a specific block. */
4602 static sbitmap *pre_delete_map;
4603
4604 /* Contains the edge_list returned by pre_edge_lcm. */
4605 static struct edge_list *edge_list;
4606
4607 /* Redundant insns. */
4608 static sbitmap pre_redundant_insns;
4609
4610 /* Allocate vars used for PRE analysis. */
4611
4612 static void
4613 alloc_pre_mem (n_blocks, n_exprs)
4614 int n_blocks, n_exprs;
4615 {
4616 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4617 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4618 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4619
4620 pre_optimal = NULL;
4621 pre_redundant = NULL;
4622 pre_insert_map = NULL;
4623 pre_delete_map = NULL;
4624 ae_in = NULL;
4625 ae_out = NULL;
4626 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4627
4628 /* pre_insert and pre_delete are allocated later. */
4629 }
4630
4631 /* Free vars used for PRE analysis. */
4632
4633 static void
4634 free_pre_mem ()
4635 {
4636 sbitmap_vector_free (transp);
4637 sbitmap_vector_free (comp);
4638
4639 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4640
4641 if (pre_optimal)
4642 sbitmap_vector_free (pre_optimal);
4643 if (pre_redundant)
4644 sbitmap_vector_free (pre_redundant);
4645 if (pre_insert_map)
4646 sbitmap_vector_free (pre_insert_map);
4647 if (pre_delete_map)
4648 sbitmap_vector_free (pre_delete_map);
4649 if (ae_in)
4650 sbitmap_vector_free (ae_in);
4651 if (ae_out)
4652 sbitmap_vector_free (ae_out);
4653
4654 transp = comp = NULL;
4655 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4656 ae_in = ae_out = NULL;
4657 }
4658
4659 /* Top level routine to do the dataflow analysis needed by PRE. */
4660
4661 static void
4662 compute_pre_data ()
4663 {
4664 sbitmap trapping_expr;
4665 basic_block bb;
4666 unsigned int ui;
4667
4668 compute_local_properties (transp, comp, antloc, 0);
4669 sbitmap_vector_zero (ae_kill, last_basic_block);
4670
4671 /* Collect expressions which might trap. */
4672 trapping_expr = sbitmap_alloc (n_exprs);
4673 sbitmap_zero (trapping_expr);
4674 for (ui = 0; ui < expr_hash_table_size; ui++)
4675 {
4676 struct expr *e;
4677 for (e = expr_hash_table[ui]; e != NULL; e = e->next_same_hash)
4678 if (may_trap_p (e->expr))
4679 SET_BIT (trapping_expr, e->bitmap_index);
4680 }
4681
4682 /* Compute ae_kill for each basic block using:
4683
4684 ~(TRANSP | COMP)
4685
4686 This is significantly faster than compute_ae_kill. */
4687
4688 FOR_EACH_BB (bb)
4689 {
4690 edge e;
4691
4692 /* If the current block is the destination of an abnormal edge, we
4693 kill all trapping expressions because we won't be able to properly
4694 place the instruction on the edge. So make them neither
4695 anticipatable nor transparent. This is fairly conservative. */
4696 for (e = bb->pred; e ; e = e->pred_next)
4697 if (e->flags & EDGE_ABNORMAL)
4698 {
4699 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
4700 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
4701 break;
4702 }
4703
4704 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
4705 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
4706 }
4707
4708 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4709 ae_kill, &pre_insert_map, &pre_delete_map);
4710 sbitmap_vector_free (antloc);
4711 antloc = NULL;
4712 sbitmap_vector_free (ae_kill);
4713 ae_kill = NULL;
4714 sbitmap_free (trapping_expr);
4715 }
4716 \f
4717 /* PRE utilities */
4718
4719 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4720 block BB.
4721
4722 VISITED is a pointer to a working buffer for tracking which BB's have
4723 been visited. It is NULL for the top-level call.
4724
4725 We treat reaching expressions that go through blocks containing the same
4726 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4727 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4728 2 as not reaching. The intent is to improve the probability of finding
4729 only one reaching expression and to reduce register lifetimes by picking
4730 the closest such expression. */
4731
4732 static int
4733 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4734 basic_block occr_bb;
4735 struct expr *expr;
4736 basic_block bb;
4737 char *visited;
4738 {
4739 edge pred;
4740
4741 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
4742 {
4743 basic_block pred_bb = pred->src;
4744
4745 if (pred->src == ENTRY_BLOCK_PTR
4746 /* Has predecessor has already been visited? */
4747 || visited[pred_bb->index])
4748 ;/* Nothing to do. */
4749
4750 /* Does this predecessor generate this expression? */
4751 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
4752 {
4753 /* Is this the occurrence we're looking for?
4754 Note that there's only one generating occurrence per block
4755 so we just need to check the block number. */
4756 if (occr_bb == pred_bb)
4757 return 1;
4758
4759 visited[pred_bb->index] = 1;
4760 }
4761 /* Ignore this predecessor if it kills the expression. */
4762 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
4763 visited[pred_bb->index] = 1;
4764
4765 /* Neither gen nor kill. */
4766 else
4767 {
4768 visited[pred_bb->index] = 1;
4769 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4770 return 1;
4771 }
4772 }
4773
4774 /* All paths have been checked. */
4775 return 0;
4776 }
4777
4778 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4779 memory allocated for that function is returned. */
4780
4781 static int
4782 pre_expr_reaches_here_p (occr_bb, expr, bb)
4783 basic_block occr_bb;
4784 struct expr *expr;
4785 basic_block bb;
4786 {
4787 int rval;
4788 char *visited = (char *) xcalloc (last_basic_block, 1);
4789
4790 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
4791
4792 free (visited);
4793 return rval;
4794 }
4795 \f
4796
4797 /* Given an expr, generate RTL which we can insert at the end of a BB,
4798 or on an edge. Set the block number of any insns generated to
4799 the value of BB. */
4800
4801 static rtx
4802 process_insert_insn (expr)
4803 struct expr *expr;
4804 {
4805 rtx reg = expr->reaching_reg;
4806 rtx exp = copy_rtx (expr->expr);
4807 rtx pat;
4808
4809 start_sequence ();
4810
4811 /* If the expression is something that's an operand, like a constant,
4812 just copy it to a register. */
4813 if (general_operand (exp, GET_MODE (reg)))
4814 emit_move_insn (reg, exp);
4815
4816 /* Otherwise, make a new insn to compute this expression and make sure the
4817 insn will be recognized (this also adds any needed CLOBBERs). Copy the
4818 expression to make sure we don't have any sharing issues. */
4819 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
4820 abort ();
4821
4822 pat = gen_sequence ();
4823 end_sequence ();
4824
4825 return pat;
4826 }
4827
4828 /* Add EXPR to the end of basic block BB.
4829
4830 This is used by both the PRE and code hoisting.
4831
4832 For PRE, we want to verify that the expr is either transparent
4833 or locally anticipatable in the target block. This check makes
4834 no sense for code hoisting. */
4835
4836 static void
4837 insert_insn_end_bb (expr, bb, pre)
4838 struct expr *expr;
4839 basic_block bb;
4840 int pre;
4841 {
4842 rtx insn = bb->end;
4843 rtx new_insn;
4844 rtx reg = expr->reaching_reg;
4845 int regno = REGNO (reg);
4846 rtx pat;
4847 int i;
4848
4849 pat = process_insert_insn (expr);
4850
4851 /* If the last insn is a jump, insert EXPR in front [taking care to
4852 handle cc0, etc. properly]. Similary we need to care trapping
4853 instructions in presence of non-call exceptions. */
4854
4855 if (GET_CODE (insn) == JUMP_INSN
4856 || (GET_CODE (insn) == INSN
4857 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
4858 {
4859 #ifdef HAVE_cc0
4860 rtx note;
4861 #endif
4862 /* It should always be the case that we can put these instructions
4863 anywhere in the basic block with performing PRE optimizations.
4864 Check this. */
4865 if (GET_CODE (insn) == INSN && pre
4866 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4867 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4868 abort ();
4869
4870 /* If this is a jump table, then we can't insert stuff here. Since
4871 we know the previous real insn must be the tablejump, we insert
4872 the new instruction just before the tablejump. */
4873 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4874 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4875 insn = prev_real_insn (insn);
4876
4877 #ifdef HAVE_cc0
4878 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4879 if cc0 isn't set. */
4880 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4881 if (note)
4882 insn = XEXP (note, 0);
4883 else
4884 {
4885 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4886 if (maybe_cc0_setter
4887 && INSN_P (maybe_cc0_setter)
4888 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4889 insn = maybe_cc0_setter;
4890 }
4891 #endif
4892 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4893 new_insn = emit_insn_before (pat, insn);
4894 }
4895
4896 /* Likewise if the last insn is a call, as will happen in the presence
4897 of exception handling. */
4898 else if (GET_CODE (insn) == CALL_INSN
4899 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
4900 {
4901 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4902 we search backward and place the instructions before the first
4903 parameter is loaded. Do this for everyone for consistency and a
4904 presumtion that we'll get better code elsewhere as well.
4905
4906 It should always be the case that we can put these instructions
4907 anywhere in the basic block with performing PRE optimizations.
4908 Check this. */
4909
4910 if (pre
4911 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
4912 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
4913 abort ();
4914
4915 /* Since different machines initialize their parameter registers
4916 in different orders, assume nothing. Collect the set of all
4917 parameter registers. */
4918 insn = find_first_parameter_load (insn, bb->head);
4919
4920 /* If we found all the parameter loads, then we want to insert
4921 before the first parameter load.
4922
4923 If we did not find all the parameter loads, then we might have
4924 stopped on the head of the block, which could be a CODE_LABEL.
4925 If we inserted before the CODE_LABEL, then we would be putting
4926 the insn in the wrong basic block. In that case, put the insn
4927 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4928 while (GET_CODE (insn) == CODE_LABEL
4929 || NOTE_INSN_BASIC_BLOCK_P (insn))
4930 insn = NEXT_INSN (insn);
4931
4932 new_insn = emit_insn_before (pat, insn);
4933 }
4934 else
4935 new_insn = emit_insn_after (pat, insn);
4936
4937 /* Keep block number table up to date.
4938 Note, PAT could be a multiple insn sequence, we have to make
4939 sure that each insn in the sequence is handled. */
4940 if (GET_CODE (pat) == SEQUENCE)
4941 {
4942 for (i = 0; i < XVECLEN (pat, 0); i++)
4943 {
4944 rtx insn = XVECEXP (pat, 0, i);
4945 if (INSN_P (insn))
4946 add_label_notes (PATTERN (insn), new_insn);
4947
4948 note_stores (PATTERN (insn), record_set_info, insn);
4949 }
4950 }
4951 else
4952 {
4953 add_label_notes (pat, new_insn);
4954
4955 /* Keep register set table up to date. */
4956 record_one_set (regno, new_insn);
4957 }
4958
4959 gcse_create_count++;
4960
4961 if (gcse_file)
4962 {
4963 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4964 bb->index, INSN_UID (new_insn));
4965 fprintf (gcse_file, "copying expression %d to reg %d\n",
4966 expr->bitmap_index, regno);
4967 }
4968 }
4969
4970 /* Insert partially redundant expressions on edges in the CFG to make
4971 the expressions fully redundant. */
4972
4973 static int
4974 pre_edge_insert (edge_list, index_map)
4975 struct edge_list *edge_list;
4976 struct expr **index_map;
4977 {
4978 int e, i, j, num_edges, set_size, did_insert = 0;
4979 sbitmap *inserted;
4980
4981 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4982 if it reaches any of the deleted expressions. */
4983
4984 set_size = pre_insert_map[0]->size;
4985 num_edges = NUM_EDGES (edge_list);
4986 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4987 sbitmap_vector_zero (inserted, num_edges);
4988
4989 for (e = 0; e < num_edges; e++)
4990 {
4991 int indx;
4992 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4993
4994 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4995 {
4996 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4997
4998 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4999 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
5000 {
5001 struct expr *expr = index_map[j];
5002 struct occr *occr;
5003
5004 /* Now look at each deleted occurrence of this expression. */
5005 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5006 {
5007 if (! occr->deleted_p)
5008 continue;
5009
5010 /* Insert this expression on this edge if if it would
5011 reach the deleted occurrence in BB. */
5012 if (!TEST_BIT (inserted[e], j))
5013 {
5014 rtx insn;
5015 edge eg = INDEX_EDGE (edge_list, e);
5016
5017 /* We can't insert anything on an abnormal and
5018 critical edge, so we insert the insn at the end of
5019 the previous block. There are several alternatives
5020 detailed in Morgans book P277 (sec 10.5) for
5021 handling this situation. This one is easiest for
5022 now. */
5023
5024 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
5025 insert_insn_end_bb (index_map[j], bb, 0);
5026 else
5027 {
5028 insn = process_insert_insn (index_map[j]);
5029 insert_insn_on_edge (insn, eg);
5030 }
5031
5032 if (gcse_file)
5033 {
5034 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
5035 bb->index,
5036 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
5037 fprintf (gcse_file, "copy expression %d\n",
5038 expr->bitmap_index);
5039 }
5040
5041 update_ld_motion_stores (expr);
5042 SET_BIT (inserted[e], j);
5043 did_insert = 1;
5044 gcse_create_count++;
5045 }
5046 }
5047 }
5048 }
5049 }
5050
5051 sbitmap_vector_free (inserted);
5052 return did_insert;
5053 }
5054
5055 /* Copy the result of INSN to REG. INDX is the expression number. */
5056
5057 static void
5058 pre_insert_copy_insn (expr, insn)
5059 struct expr *expr;
5060 rtx insn;
5061 {
5062 rtx reg = expr->reaching_reg;
5063 int regno = REGNO (reg);
5064 int indx = expr->bitmap_index;
5065 rtx set = single_set (insn);
5066 rtx new_insn;
5067
5068 if (!set)
5069 abort ();
5070
5071 new_insn = emit_insn_after (gen_move_insn (reg, SET_DEST (set)), insn);
5072
5073 /* Keep register set table up to date. */
5074 record_one_set (regno, new_insn);
5075
5076 gcse_create_count++;
5077
5078 if (gcse_file)
5079 fprintf (gcse_file,
5080 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
5081 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
5082 INSN_UID (insn), regno);
5083 update_ld_motion_stores (expr);
5084 }
5085
5086 /* Copy available expressions that reach the redundant expression
5087 to `reaching_reg'. */
5088
5089 static void
5090 pre_insert_copies ()
5091 {
5092 unsigned int i;
5093 struct expr *expr;
5094 struct occr *occr;
5095 struct occr *avail;
5096
5097 /* For each available expression in the table, copy the result to
5098 `reaching_reg' if the expression reaches a deleted one.
5099
5100 ??? The current algorithm is rather brute force.
5101 Need to do some profiling. */
5102
5103 for (i = 0; i < expr_hash_table_size; i++)
5104 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5105 {
5106 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
5107 we don't want to insert a copy here because the expression may not
5108 really be redundant. So only insert an insn if the expression was
5109 deleted. This test also avoids further processing if the
5110 expression wasn't deleted anywhere. */
5111 if (expr->reaching_reg == NULL)
5112 continue;
5113
5114 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5115 {
5116 if (! occr->deleted_p)
5117 continue;
5118
5119 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
5120 {
5121 rtx insn = avail->insn;
5122
5123 /* No need to handle this one if handled already. */
5124 if (avail->copied_p)
5125 continue;
5126
5127 /* Don't handle this one if it's a redundant one. */
5128 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
5129 continue;
5130
5131 /* Or if the expression doesn't reach the deleted one. */
5132 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
5133 expr,
5134 BLOCK_FOR_INSN (occr->insn)))
5135 continue;
5136
5137 /* Copy the result of avail to reaching_reg. */
5138 pre_insert_copy_insn (expr, insn);
5139 avail->copied_p = 1;
5140 }
5141 }
5142 }
5143 }
5144
5145 /* Emit move from SRC to DEST noting the equivalence with expression computed
5146 in INSN. */
5147 static rtx
5148 gcse_emit_move_after (src, dest, insn)
5149 rtx src, dest, insn;
5150 {
5151 rtx new;
5152 rtx set = single_set (insn);
5153 rtx note;
5154 rtx eqv;
5155
5156 /* This should never fail since we're creating a reg->reg copy
5157 we've verified to be valid. */
5158
5159 new = emit_insn_after (gen_rtx_SET (VOIDmode, dest, src), insn);
5160
5161 /* Note the equivalence for local CSE pass. */
5162 if ((note = find_reg_equal_equiv_note (insn)))
5163 eqv = XEXP (note, 0);
5164 else
5165 eqv = SET_SRC (set);
5166
5167 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (src));
5168
5169 return new;
5170 }
5171
5172 /* Delete redundant computations.
5173 Deletion is done by changing the insn to copy the `reaching_reg' of
5174 the expression into the result of the SET. It is left to later passes
5175 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
5176
5177 Returns non-zero if a change is made. */
5178
5179 static int
5180 pre_delete ()
5181 {
5182 unsigned int i;
5183 int changed;
5184 struct expr *expr;
5185 struct occr *occr;
5186
5187 changed = 0;
5188 for (i = 0; i < expr_hash_table_size; i++)
5189 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5190 {
5191 int indx = expr->bitmap_index;
5192
5193 /* We only need to search antic_occr since we require
5194 ANTLOC != 0. */
5195
5196 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5197 {
5198 rtx insn = occr->insn;
5199 rtx set;
5200 basic_block bb = BLOCK_FOR_INSN (insn);
5201
5202 if (TEST_BIT (pre_delete_map[bb->index], indx))
5203 {
5204 set = single_set (insn);
5205 if (! set)
5206 abort ();
5207
5208 /* Create a pseudo-reg to store the result of reaching
5209 expressions into. Get the mode for the new pseudo from
5210 the mode of the original destination pseudo. */
5211 if (expr->reaching_reg == NULL)
5212 expr->reaching_reg
5213 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5214
5215 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
5216 delete_insn (insn);
5217 occr->deleted_p = 1;
5218 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
5219 changed = 1;
5220 gcse_subst_count++;
5221
5222 if (gcse_file)
5223 {
5224 fprintf (gcse_file,
5225 "PRE: redundant insn %d (expression %d) in ",
5226 INSN_UID (insn), indx);
5227 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
5228 bb->index, REGNO (expr->reaching_reg));
5229 }
5230 }
5231 }
5232 }
5233
5234 return changed;
5235 }
5236
5237 /* Perform GCSE optimizations using PRE.
5238 This is called by one_pre_gcse_pass after all the dataflow analysis
5239 has been done.
5240
5241 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5242 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5243 Compiler Design and Implementation.
5244
5245 ??? A new pseudo reg is created to hold the reaching expression. The nice
5246 thing about the classical approach is that it would try to use an existing
5247 reg. If the register can't be adequately optimized [i.e. we introduce
5248 reload problems], one could add a pass here to propagate the new register
5249 through the block.
5250
5251 ??? We don't handle single sets in PARALLELs because we're [currently] not
5252 able to copy the rest of the parallel when we insert copies to create full
5253 redundancies from partial redundancies. However, there's no reason why we
5254 can't handle PARALLELs in the cases where there are no partial
5255 redundancies. */
5256
5257 static int
5258 pre_gcse ()
5259 {
5260 unsigned int i;
5261 int did_insert, changed;
5262 struct expr **index_map;
5263 struct expr *expr;
5264
5265 /* Compute a mapping from expression number (`bitmap_index') to
5266 hash table entry. */
5267
5268 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5269 for (i = 0; i < expr_hash_table_size; i++)
5270 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5271 index_map[expr->bitmap_index] = expr;
5272
5273 /* Reset bitmap used to track which insns are redundant. */
5274 pre_redundant_insns = sbitmap_alloc (max_cuid);
5275 sbitmap_zero (pre_redundant_insns);
5276
5277 /* Delete the redundant insns first so that
5278 - we know what register to use for the new insns and for the other
5279 ones with reaching expressions
5280 - we know which insns are redundant when we go to create copies */
5281
5282 changed = pre_delete ();
5283
5284 did_insert = pre_edge_insert (edge_list, index_map);
5285
5286 /* In other places with reaching expressions, copy the expression to the
5287 specially allocated pseudo-reg that reaches the redundant expr. */
5288 pre_insert_copies ();
5289 if (did_insert)
5290 {
5291 commit_edge_insertions ();
5292 changed = 1;
5293 }
5294
5295 free (index_map);
5296 sbitmap_free (pre_redundant_insns);
5297 return changed;
5298 }
5299
5300 /* Top level routine to perform one PRE GCSE pass.
5301
5302 Return non-zero if a change was made. */
5303
5304 static int
5305 one_pre_gcse_pass (pass)
5306 int pass;
5307 {
5308 int changed = 0;
5309
5310 gcse_subst_count = 0;
5311 gcse_create_count = 0;
5312
5313 alloc_expr_hash_table (max_cuid);
5314 add_noreturn_fake_exit_edges ();
5315 if (flag_gcse_lm)
5316 compute_ld_motion_mems ();
5317
5318 compute_expr_hash_table ();
5319 trim_ld_motion_mems ();
5320 if (gcse_file)
5321 dump_hash_table (gcse_file, "Expression", expr_hash_table,
5322 expr_hash_table_size, n_exprs);
5323
5324 if (n_exprs > 0)
5325 {
5326 alloc_pre_mem (last_basic_block, n_exprs);
5327 compute_pre_data ();
5328 changed |= pre_gcse ();
5329 free_edge_list (edge_list);
5330 free_pre_mem ();
5331 }
5332
5333 free_ldst_mems ();
5334 remove_fake_edges ();
5335 free_expr_hash_table ();
5336
5337 if (gcse_file)
5338 {
5339 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5340 current_function_name, pass, bytes_used);
5341 fprintf (gcse_file, "%d substs, %d insns created\n",
5342 gcse_subst_count, gcse_create_count);
5343 }
5344
5345 return changed;
5346 }
5347 \f
5348 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5349 If notes are added to an insn which references a CODE_LABEL, the
5350 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5351 because the following loop optimization pass requires them. */
5352
5353 /* ??? This is very similar to the loop.c add_label_notes function. We
5354 could probably share code here. */
5355
5356 /* ??? If there was a jump optimization pass after gcse and before loop,
5357 then we would not need to do this here, because jump would add the
5358 necessary REG_LABEL notes. */
5359
5360 static void
5361 add_label_notes (x, insn)
5362 rtx x;
5363 rtx insn;
5364 {
5365 enum rtx_code code = GET_CODE (x);
5366 int i, j;
5367 const char *fmt;
5368
5369 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5370 {
5371 /* This code used to ignore labels that referred to dispatch tables to
5372 avoid flow generating (slighly) worse code.
5373
5374 We no longer ignore such label references (see LABEL_REF handling in
5375 mark_jump_label for additional information). */
5376
5377 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5378 REG_NOTES (insn));
5379 if (LABEL_P (XEXP (x, 0)))
5380 LABEL_NUSES (XEXP (x, 0))++;
5381 return;
5382 }
5383
5384 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5385 {
5386 if (fmt[i] == 'e')
5387 add_label_notes (XEXP (x, i), insn);
5388 else if (fmt[i] == 'E')
5389 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5390 add_label_notes (XVECEXP (x, i, j), insn);
5391 }
5392 }
5393
5394 /* Compute transparent outgoing information for each block.
5395
5396 An expression is transparent to an edge unless it is killed by
5397 the edge itself. This can only happen with abnormal control flow,
5398 when the edge is traversed through a call. This happens with
5399 non-local labels and exceptions.
5400
5401 This would not be necessary if we split the edge. While this is
5402 normally impossible for abnormal critical edges, with some effort
5403 it should be possible with exception handling, since we still have
5404 control over which handler should be invoked. But due to increased
5405 EH table sizes, this may not be worthwhile. */
5406
5407 static void
5408 compute_transpout ()
5409 {
5410 basic_block bb;
5411 unsigned int i;
5412 struct expr *expr;
5413
5414 sbitmap_vector_ones (transpout, last_basic_block);
5415
5416 FOR_EACH_BB (bb)
5417 {
5418 /* Note that flow inserted a nop a the end of basic blocks that
5419 end in call instructions for reasons other than abnormal
5420 control flow. */
5421 if (GET_CODE (bb->end) != CALL_INSN)
5422 continue;
5423
5424 for (i = 0; i < expr_hash_table_size; i++)
5425 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
5426 if (GET_CODE (expr->expr) == MEM)
5427 {
5428 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5429 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5430 continue;
5431
5432 /* ??? Optimally, we would use interprocedural alias
5433 analysis to determine if this mem is actually killed
5434 by this call. */
5435 RESET_BIT (transpout[bb->index], expr->bitmap_index);
5436 }
5437 }
5438 }
5439
5440 /* Removal of useless null pointer checks */
5441
5442 /* Called via note_stores. X is set by SETTER. If X is a register we must
5443 invalidate nonnull_local and set nonnull_killed. DATA is really a
5444 `null_pointer_info *'.
5445
5446 We ignore hard registers. */
5447
5448 static void
5449 invalidate_nonnull_info (x, setter, data)
5450 rtx x;
5451 rtx setter ATTRIBUTE_UNUSED;
5452 void *data;
5453 {
5454 unsigned int regno;
5455 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5456
5457 while (GET_CODE (x) == SUBREG)
5458 x = SUBREG_REG (x);
5459
5460 /* Ignore anything that is not a register or is a hard register. */
5461 if (GET_CODE (x) != REG
5462 || REGNO (x) < npi->min_reg
5463 || REGNO (x) >= npi->max_reg)
5464 return;
5465
5466 regno = REGNO (x) - npi->min_reg;
5467
5468 RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
5469 SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
5470 }
5471
5472 /* Do null-pointer check elimination for the registers indicated in
5473 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5474 they are not our responsibility to free. */
5475
5476 static void
5477 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5478 nonnull_avout, npi)
5479 unsigned int *block_reg;
5480 sbitmap *nonnull_avin;
5481 sbitmap *nonnull_avout;
5482 struct null_pointer_info *npi;
5483 {
5484 basic_block bb, current_block;
5485 sbitmap *nonnull_local = npi->nonnull_local;
5486 sbitmap *nonnull_killed = npi->nonnull_killed;
5487
5488 /* Compute local properties, nonnull and killed. A register will have
5489 the nonnull property if at the end of the current block its value is
5490 known to be nonnull. The killed property indicates that somewhere in
5491 the block any information we had about the register is killed.
5492
5493 Note that a register can have both properties in a single block. That
5494 indicates that it's killed, then later in the block a new value is
5495 computed. */
5496 sbitmap_vector_zero (nonnull_local, last_basic_block);
5497 sbitmap_vector_zero (nonnull_killed, last_basic_block);
5498
5499 FOR_EACH_BB (current_block)
5500 {
5501 rtx insn, stop_insn;
5502
5503 /* Set the current block for invalidate_nonnull_info. */
5504 npi->current_block = current_block;
5505
5506 /* Scan each insn in the basic block looking for memory references and
5507 register sets. */
5508 stop_insn = NEXT_INSN (current_block->end);
5509 for (insn = current_block->head;
5510 insn != stop_insn;
5511 insn = NEXT_INSN (insn))
5512 {
5513 rtx set;
5514 rtx reg;
5515
5516 /* Ignore anything that is not a normal insn. */
5517 if (! INSN_P (insn))
5518 continue;
5519
5520 /* Basically ignore anything that is not a simple SET. We do have
5521 to make sure to invalidate nonnull_local and set nonnull_killed
5522 for such insns though. */
5523 set = single_set (insn);
5524 if (!set)
5525 {
5526 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5527 continue;
5528 }
5529
5530 /* See if we've got a usable memory load. We handle it first
5531 in case it uses its address register as a dest (which kills
5532 the nonnull property). */
5533 if (GET_CODE (SET_SRC (set)) == MEM
5534 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5535 && REGNO (reg) >= npi->min_reg
5536 && REGNO (reg) < npi->max_reg)
5537 SET_BIT (nonnull_local[current_block->index],
5538 REGNO (reg) - npi->min_reg);
5539
5540 /* Now invalidate stuff clobbered by this insn. */
5541 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5542
5543 /* And handle stores, we do these last since any sets in INSN can
5544 not kill the nonnull property if it is derived from a MEM
5545 appearing in a SET_DEST. */
5546 if (GET_CODE (SET_DEST (set)) == MEM
5547 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5548 && REGNO (reg) >= npi->min_reg
5549 && REGNO (reg) < npi->max_reg)
5550 SET_BIT (nonnull_local[current_block->index],
5551 REGNO (reg) - npi->min_reg);
5552 }
5553 }
5554
5555 /* Now compute global properties based on the local properties. This
5556 is a classic global availablity algorithm. */
5557 compute_available (nonnull_local, nonnull_killed,
5558 nonnull_avout, nonnull_avin);
5559
5560 /* Now look at each bb and see if it ends with a compare of a value
5561 against zero. */
5562 FOR_EACH_BB (bb)
5563 {
5564 rtx last_insn = bb->end;
5565 rtx condition, earliest;
5566 int compare_and_branch;
5567
5568 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5569 since BLOCK_REG[BB] is zero if this block did not end with a
5570 comparison against zero, this condition works. */
5571 if (block_reg[bb->index] < npi->min_reg
5572 || block_reg[bb->index] >= npi->max_reg)
5573 continue;
5574
5575 /* LAST_INSN is a conditional jump. Get its condition. */
5576 condition = get_condition (last_insn, &earliest);
5577
5578 /* If we can't determine the condition then skip. */
5579 if (! condition)
5580 continue;
5581
5582 /* Is the register known to have a nonzero value? */
5583 if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
5584 continue;
5585
5586 /* Try to compute whether the compare/branch at the loop end is one or
5587 two instructions. */
5588 if (earliest == last_insn)
5589 compare_and_branch = 1;
5590 else if (earliest == prev_nonnote_insn (last_insn))
5591 compare_and_branch = 2;
5592 else
5593 continue;
5594
5595 /* We know the register in this comparison is nonnull at exit from
5596 this block. We can optimize this comparison. */
5597 if (GET_CODE (condition) == NE)
5598 {
5599 rtx new_jump;
5600
5601 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
5602 last_insn);
5603 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5604 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5605 emit_barrier_after (new_jump);
5606 }
5607
5608 delete_insn (last_insn);
5609 if (compare_and_branch == 2)
5610 delete_insn (earliest);
5611 purge_dead_edges (bb);
5612
5613 /* Don't check this block again. (Note that BLOCK_END is
5614 invalid here; we deleted the last instruction in the
5615 block.) */
5616 block_reg[bb->index] = 0;
5617 }
5618 }
5619
5620 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5621 at compile time.
5622
5623 This is conceptually similar to global constant/copy propagation and
5624 classic global CSE (it even uses the same dataflow equations as cprop).
5625
5626 If a register is used as memory address with the form (mem (reg)), then we
5627 know that REG can not be zero at that point in the program. Any instruction
5628 which sets REG "kills" this property.
5629
5630 So, if every path leading to a conditional branch has an available memory
5631 reference of that form, then we know the register can not have the value
5632 zero at the conditional branch.
5633
5634 So we merely need to compute the local properies and propagate that data
5635 around the cfg, then optimize where possible.
5636
5637 We run this pass two times. Once before CSE, then again after CSE. This
5638 has proven to be the most profitable approach. It is rare for new
5639 optimization opportunities of this nature to appear after the first CSE
5640 pass.
5641
5642 This could probably be integrated with global cprop with a little work. */
5643
5644 void
5645 delete_null_pointer_checks (f)
5646 rtx f ATTRIBUTE_UNUSED;
5647 {
5648 sbitmap *nonnull_avin, *nonnull_avout;
5649 unsigned int *block_reg;
5650 basic_block bb;
5651 int reg;
5652 int regs_per_pass;
5653 int max_reg;
5654 struct null_pointer_info npi;
5655
5656 /* If we have only a single block, then there's nothing to do. */
5657 if (n_basic_blocks <= 1)
5658 return;
5659
5660 /* Trying to perform global optimizations on flow graphs which have
5661 a high connectivity will take a long time and is unlikely to be
5662 particularly useful.
5663
5664 In normal circumstances a cfg should have about twice as many edges
5665 as blocks. But we do not want to punish small functions which have
5666 a couple switch statements. So we require a relatively large number
5667 of basic blocks and the ratio of edges to blocks to be high. */
5668 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5669 return;
5670
5671 /* We need four bitmaps, each with a bit for each register in each
5672 basic block. */
5673 max_reg = max_reg_num ();
5674 regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
5675
5676 /* Allocate bitmaps to hold local and global properties. */
5677 npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5678 npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5679 nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5680 nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5681
5682 /* Go through the basic blocks, seeing whether or not each block
5683 ends with a conditional branch whose condition is a comparison
5684 against zero. Record the register compared in BLOCK_REG. */
5685 block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
5686 FOR_EACH_BB (bb)
5687 {
5688 rtx last_insn = bb->end;
5689 rtx condition, earliest, reg;
5690
5691 /* We only want conditional branches. */
5692 if (GET_CODE (last_insn) != JUMP_INSN
5693 || !any_condjump_p (last_insn)
5694 || !onlyjump_p (last_insn))
5695 continue;
5696
5697 /* LAST_INSN is a conditional jump. Get its condition. */
5698 condition = get_condition (last_insn, &earliest);
5699
5700 /* If we were unable to get the condition, or it is not an equality
5701 comparison against zero then there's nothing we can do. */
5702 if (!condition
5703 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5704 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5705 || (XEXP (condition, 1)
5706 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5707 continue;
5708
5709 /* We must be checking a register against zero. */
5710 reg = XEXP (condition, 0);
5711 if (GET_CODE (reg) != REG)
5712 continue;
5713
5714 block_reg[bb->index] = REGNO (reg);
5715 }
5716
5717 /* Go through the algorithm for each block of registers. */
5718 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5719 {
5720 npi.min_reg = reg;
5721 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5722 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5723 nonnull_avout, &npi);
5724 }
5725
5726 /* Free the table of registers compared at the end of every block. */
5727 free (block_reg);
5728
5729 /* Free bitmaps. */
5730 sbitmap_vector_free (npi.nonnull_local);
5731 sbitmap_vector_free (npi.nonnull_killed);
5732 sbitmap_vector_free (nonnull_avin);
5733 sbitmap_vector_free (nonnull_avout);
5734 }
5735
5736 /* Code Hoisting variables and subroutines. */
5737
5738 /* Very busy expressions. */
5739 static sbitmap *hoist_vbein;
5740 static sbitmap *hoist_vbeout;
5741
5742 /* Hoistable expressions. */
5743 static sbitmap *hoist_exprs;
5744
5745 /* Dominator bitmaps. */
5746 static sbitmap *dominators;
5747
5748 /* ??? We could compute post dominators and run this algorithm in
5749 reverse to to perform tail merging, doing so would probably be
5750 more effective than the tail merging code in jump.c.
5751
5752 It's unclear if tail merging could be run in parallel with
5753 code hoisting. It would be nice. */
5754
5755 /* Allocate vars used for code hoisting analysis. */
5756
5757 static void
5758 alloc_code_hoist_mem (n_blocks, n_exprs)
5759 int n_blocks, n_exprs;
5760 {
5761 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5762 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5763 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5764
5765 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5766 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5767 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5768 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5769
5770 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5771 }
5772
5773 /* Free vars used for code hoisting analysis. */
5774
5775 static void
5776 free_code_hoist_mem ()
5777 {
5778 sbitmap_vector_free (antloc);
5779 sbitmap_vector_free (transp);
5780 sbitmap_vector_free (comp);
5781
5782 sbitmap_vector_free (hoist_vbein);
5783 sbitmap_vector_free (hoist_vbeout);
5784 sbitmap_vector_free (hoist_exprs);
5785 sbitmap_vector_free (transpout);
5786
5787 sbitmap_vector_free (dominators);
5788 }
5789
5790 /* Compute the very busy expressions at entry/exit from each block.
5791
5792 An expression is very busy if all paths from a given point
5793 compute the expression. */
5794
5795 static void
5796 compute_code_hoist_vbeinout ()
5797 {
5798 int changed, passes;
5799 basic_block bb;
5800
5801 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
5802 sbitmap_vector_zero (hoist_vbein, last_basic_block);
5803
5804 passes = 0;
5805 changed = 1;
5806
5807 while (changed)
5808 {
5809 changed = 0;
5810
5811 /* We scan the blocks in the reverse order to speed up
5812 the convergence. */
5813 FOR_EACH_BB_REVERSE (bb)
5814 {
5815 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
5816 hoist_vbeout[bb->index], transp[bb->index]);
5817 if (bb->next_bb != EXIT_BLOCK_PTR)
5818 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
5819 }
5820
5821 passes++;
5822 }
5823
5824 if (gcse_file)
5825 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5826 }
5827
5828 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5829
5830 static void
5831 compute_code_hoist_data ()
5832 {
5833 compute_local_properties (transp, comp, antloc, 0);
5834 compute_transpout ();
5835 compute_code_hoist_vbeinout ();
5836 calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
5837 if (gcse_file)
5838 fprintf (gcse_file, "\n");
5839 }
5840
5841 /* Determine if the expression identified by EXPR_INDEX would
5842 reach BB unimpared if it was placed at the end of EXPR_BB.
5843
5844 It's unclear exactly what Muchnick meant by "unimpared". It seems
5845 to me that the expression must either be computed or transparent in
5846 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5847 would allow the expression to be hoisted out of loops, even if
5848 the expression wasn't a loop invariant.
5849
5850 Contrast this to reachability for PRE where an expression is
5851 considered reachable if *any* path reaches instead of *all*
5852 paths. */
5853
5854 static int
5855 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5856 basic_block expr_bb;
5857 int expr_index;
5858 basic_block bb;
5859 char *visited;
5860 {
5861 edge pred;
5862 int visited_allocated_locally = 0;
5863
5864
5865 if (visited == NULL)
5866 {
5867 visited_allocated_locally = 1;
5868 visited = xcalloc (last_basic_block, 1);
5869 }
5870
5871 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5872 {
5873 basic_block pred_bb = pred->src;
5874
5875 if (pred->src == ENTRY_BLOCK_PTR)
5876 break;
5877 else if (visited[pred_bb->index])
5878 continue;
5879
5880 /* Does this predecessor generate this expression? */
5881 else if (TEST_BIT (comp[pred_bb->index], expr_index))
5882 break;
5883 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
5884 break;
5885
5886 /* Not killed. */
5887 else
5888 {
5889 visited[pred_bb->index] = 1;
5890 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5891 pred_bb, visited))
5892 break;
5893 }
5894 }
5895 if (visited_allocated_locally)
5896 free (visited);
5897
5898 return (pred == NULL);
5899 }
5900 \f
5901 /* Actually perform code hoisting. */
5902
5903 static void
5904 hoist_code ()
5905 {
5906 basic_block bb, dominated;
5907 unsigned int i;
5908 struct expr **index_map;
5909 struct expr *expr;
5910
5911 sbitmap_vector_zero (hoist_exprs, last_basic_block);
5912
5913 /* Compute a mapping from expression number (`bitmap_index') to
5914 hash table entry. */
5915
5916 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5917 for (i = 0; i < expr_hash_table_size; i++)
5918 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5919 index_map[expr->bitmap_index] = expr;
5920
5921 /* Walk over each basic block looking for potentially hoistable
5922 expressions, nothing gets hoisted from the entry block. */
5923 FOR_EACH_BB (bb)
5924 {
5925 int found = 0;
5926 int insn_inserted_p;
5927
5928 /* Examine each expression that is very busy at the exit of this
5929 block. These are the potentially hoistable expressions. */
5930 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
5931 {
5932 int hoistable = 0;
5933
5934 if (TEST_BIT (hoist_vbeout[bb->index], i) && TEST_BIT (transpout[bb->index], i))
5935 {
5936 /* We've found a potentially hoistable expression, now
5937 we look at every block BB dominates to see if it
5938 computes the expression. */
5939 FOR_EACH_BB (dominated)
5940 {
5941 /* Ignore self dominance. */
5942 if (bb == dominated
5943 || ! TEST_BIT (dominators[dominated->index], bb->index))
5944 continue;
5945
5946 /* We've found a dominated block, now see if it computes
5947 the busy expression and whether or not moving that
5948 expression to the "beginning" of that block is safe. */
5949 if (!TEST_BIT (antloc[dominated->index], i))
5950 continue;
5951
5952 /* Note if the expression would reach the dominated block
5953 unimpared if it was placed at the end of BB.
5954
5955 Keep track of how many times this expression is hoistable
5956 from a dominated block into BB. */
5957 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
5958 hoistable++;
5959 }
5960
5961 /* If we found more than one hoistable occurrence of this
5962 expression, then note it in the bitmap of expressions to
5963 hoist. It makes no sense to hoist things which are computed
5964 in only one BB, and doing so tends to pessimize register
5965 allocation. One could increase this value to try harder
5966 to avoid any possible code expansion due to register
5967 allocation issues; however experiments have shown that
5968 the vast majority of hoistable expressions are only movable
5969 from two successors, so raising this threshhold is likely
5970 to nullify any benefit we get from code hoisting. */
5971 if (hoistable > 1)
5972 {
5973 SET_BIT (hoist_exprs[bb->index], i);
5974 found = 1;
5975 }
5976 }
5977 }
5978
5979 /* If we found nothing to hoist, then quit now. */
5980 if (! found)
5981 continue;
5982
5983 /* Loop over all the hoistable expressions. */
5984 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
5985 {
5986 /* We want to insert the expression into BB only once, so
5987 note when we've inserted it. */
5988 insn_inserted_p = 0;
5989
5990 /* These tests should be the same as the tests above. */
5991 if (TEST_BIT (hoist_vbeout[bb->index], i))
5992 {
5993 /* We've found a potentially hoistable expression, now
5994 we look at every block BB dominates to see if it
5995 computes the expression. */
5996 FOR_EACH_BB (dominated)
5997 {
5998 /* Ignore self dominance. */
5999 if (bb == dominated
6000 || ! TEST_BIT (dominators[dominated->index], bb->index))
6001 continue;
6002
6003 /* We've found a dominated block, now see if it computes
6004 the busy expression and whether or not moving that
6005 expression to the "beginning" of that block is safe. */
6006 if (!TEST_BIT (antloc[dominated->index], i))
6007 continue;
6008
6009 /* The expression is computed in the dominated block and
6010 it would be safe to compute it at the start of the
6011 dominated block. Now we have to determine if the
6012 expression would reach the dominated block if it was
6013 placed at the end of BB. */
6014 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6015 {
6016 struct expr *expr = index_map[i];
6017 struct occr *occr = expr->antic_occr;
6018 rtx insn;
6019 rtx set;
6020
6021 /* Find the right occurrence of this expression. */
6022 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
6023 occr = occr->next;
6024
6025 /* Should never happen. */
6026 if (!occr)
6027 abort ();
6028
6029 insn = occr->insn;
6030
6031 set = single_set (insn);
6032 if (! set)
6033 abort ();
6034
6035 /* Create a pseudo-reg to store the result of reaching
6036 expressions into. Get the mode for the new pseudo
6037 from the mode of the original destination pseudo. */
6038 if (expr->reaching_reg == NULL)
6039 expr->reaching_reg
6040 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
6041
6042 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
6043 delete_insn (insn);
6044 occr->deleted_p = 1;
6045 if (!insn_inserted_p)
6046 {
6047 insert_insn_end_bb (index_map[i], bb, 0);
6048 insn_inserted_p = 1;
6049 }
6050 }
6051 }
6052 }
6053 }
6054 }
6055
6056 free (index_map);
6057 }
6058
6059 /* Top level routine to perform one code hoisting (aka unification) pass
6060
6061 Return non-zero if a change was made. */
6062
6063 static int
6064 one_code_hoisting_pass ()
6065 {
6066 int changed = 0;
6067
6068 alloc_expr_hash_table (max_cuid);
6069 compute_expr_hash_table ();
6070 if (gcse_file)
6071 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
6072 expr_hash_table_size, n_exprs);
6073
6074 if (n_exprs > 0)
6075 {
6076 alloc_code_hoist_mem (last_basic_block, n_exprs);
6077 compute_code_hoist_data ();
6078 hoist_code ();
6079 free_code_hoist_mem ();
6080 }
6081
6082 free_expr_hash_table ();
6083
6084 return changed;
6085 }
6086 \f
6087 /* Here we provide the things required to do store motion towards
6088 the exit. In order for this to be effective, gcse also needed to
6089 be taught how to move a load when it is kill only by a store to itself.
6090
6091 int i;
6092 float a[10];
6093
6094 void foo(float scale)
6095 {
6096 for (i=0; i<10; i++)
6097 a[i] *= scale;
6098 }
6099
6100 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
6101 the load out since its live around the loop, and stored at the bottom
6102 of the loop.
6103
6104 The 'Load Motion' referred to and implemented in this file is
6105 an enhancement to gcse which when using edge based lcm, recognizes
6106 this situation and allows gcse to move the load out of the loop.
6107
6108 Once gcse has hoisted the load, store motion can then push this
6109 load towards the exit, and we end up with no loads or stores of 'i'
6110 in the loop. */
6111
6112 /* This will search the ldst list for a matching expression. If it
6113 doesn't find one, we create one and initialize it. */
6114
6115 static struct ls_expr *
6116 ldst_entry (x)
6117 rtx x;
6118 {
6119 struct ls_expr * ptr;
6120
6121 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6122 if (expr_equiv_p (ptr->pattern, x))
6123 break;
6124
6125 if (!ptr)
6126 {
6127 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
6128
6129 ptr->next = pre_ldst_mems;
6130 ptr->expr = NULL;
6131 ptr->pattern = x;
6132 ptr->loads = NULL_RTX;
6133 ptr->stores = NULL_RTX;
6134 ptr->reaching_reg = NULL_RTX;
6135 ptr->invalid = 0;
6136 ptr->index = 0;
6137 ptr->hash_index = 0;
6138 pre_ldst_mems = ptr;
6139 }
6140
6141 return ptr;
6142 }
6143
6144 /* Free up an individual ldst entry. */
6145
6146 static void
6147 free_ldst_entry (ptr)
6148 struct ls_expr * ptr;
6149 {
6150 free_INSN_LIST_list (& ptr->loads);
6151 free_INSN_LIST_list (& ptr->stores);
6152
6153 free (ptr);
6154 }
6155
6156 /* Free up all memory associated with the ldst list. */
6157
6158 static void
6159 free_ldst_mems ()
6160 {
6161 while (pre_ldst_mems)
6162 {
6163 struct ls_expr * tmp = pre_ldst_mems;
6164
6165 pre_ldst_mems = pre_ldst_mems->next;
6166
6167 free_ldst_entry (tmp);
6168 }
6169
6170 pre_ldst_mems = NULL;
6171 }
6172
6173 /* Dump debugging info about the ldst list. */
6174
6175 static void
6176 print_ldst_list (file)
6177 FILE * file;
6178 {
6179 struct ls_expr * ptr;
6180
6181 fprintf (file, "LDST list: \n");
6182
6183 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6184 {
6185 fprintf (file, " Pattern (%3d): ", ptr->index);
6186
6187 print_rtl (file, ptr->pattern);
6188
6189 fprintf (file, "\n Loads : ");
6190
6191 if (ptr->loads)
6192 print_rtl (file, ptr->loads);
6193 else
6194 fprintf (file, "(nil)");
6195
6196 fprintf (file, "\n Stores : ");
6197
6198 if (ptr->stores)
6199 print_rtl (file, ptr->stores);
6200 else
6201 fprintf (file, "(nil)");
6202
6203 fprintf (file, "\n\n");
6204 }
6205
6206 fprintf (file, "\n");
6207 }
6208
6209 /* Returns 1 if X is in the list of ldst only expressions. */
6210
6211 static struct ls_expr *
6212 find_rtx_in_ldst (x)
6213 rtx x;
6214 {
6215 struct ls_expr * ptr;
6216
6217 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6218 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6219 return ptr;
6220
6221 return NULL;
6222 }
6223
6224 /* Assign each element of the list of mems a monotonically increasing value. */
6225
6226 static int
6227 enumerate_ldsts ()
6228 {
6229 struct ls_expr * ptr;
6230 int n = 0;
6231
6232 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6233 ptr->index = n++;
6234
6235 return n;
6236 }
6237
6238 /* Return first item in the list. */
6239
6240 static inline struct ls_expr *
6241 first_ls_expr ()
6242 {
6243 return pre_ldst_mems;
6244 }
6245
6246 /* Return the next item in ther list after the specified one. */
6247
6248 static inline struct ls_expr *
6249 next_ls_expr (ptr)
6250 struct ls_expr * ptr;
6251 {
6252 return ptr->next;
6253 }
6254 \f
6255 /* Load Motion for loads which only kill themselves. */
6256
6257 /* Return true if x is a simple MEM operation, with no registers or
6258 side effects. These are the types of loads we consider for the
6259 ld_motion list, otherwise we let the usual aliasing take care of it. */
6260
6261 static int
6262 simple_mem (x)
6263 rtx x;
6264 {
6265 if (GET_CODE (x) != MEM)
6266 return 0;
6267
6268 if (MEM_VOLATILE_P (x))
6269 return 0;
6270
6271 if (GET_MODE (x) == BLKmode)
6272 return 0;
6273
6274 if (!rtx_varies_p (XEXP (x, 0), 0))
6275 return 1;
6276
6277 return 0;
6278 }
6279
6280 /* Make sure there isn't a buried reference in this pattern anywhere.
6281 If there is, invalidate the entry for it since we're not capable
6282 of fixing it up just yet.. We have to be sure we know about ALL
6283 loads since the aliasing code will allow all entries in the
6284 ld_motion list to not-alias itself. If we miss a load, we will get
6285 the wrong value since gcse might common it and we won't know to
6286 fix it up. */
6287
6288 static void
6289 invalidate_any_buried_refs (x)
6290 rtx x;
6291 {
6292 const char * fmt;
6293 int i, j;
6294 struct ls_expr * ptr;
6295
6296 /* Invalidate it in the list. */
6297 if (GET_CODE (x) == MEM && simple_mem (x))
6298 {
6299 ptr = ldst_entry (x);
6300 ptr->invalid = 1;
6301 }
6302
6303 /* Recursively process the insn. */
6304 fmt = GET_RTX_FORMAT (GET_CODE (x));
6305
6306 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6307 {
6308 if (fmt[i] == 'e')
6309 invalidate_any_buried_refs (XEXP (x, i));
6310 else if (fmt[i] == 'E')
6311 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6312 invalidate_any_buried_refs (XVECEXP (x, i, j));
6313 }
6314 }
6315
6316 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6317 being defined as MEM loads and stores to symbols, with no
6318 side effects and no registers in the expression. If there are any
6319 uses/defs which don't match this criteria, it is invalidated and
6320 trimmed out later. */
6321
6322 static void
6323 compute_ld_motion_mems ()
6324 {
6325 struct ls_expr * ptr;
6326 basic_block bb;
6327 rtx insn;
6328
6329 pre_ldst_mems = NULL;
6330
6331 FOR_EACH_BB (bb)
6332 {
6333 for (insn = bb->head;
6334 insn && insn != NEXT_INSN (bb->end);
6335 insn = NEXT_INSN (insn))
6336 {
6337 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6338 {
6339 if (GET_CODE (PATTERN (insn)) == SET)
6340 {
6341 rtx src = SET_SRC (PATTERN (insn));
6342 rtx dest = SET_DEST (PATTERN (insn));
6343
6344 /* Check for a simple LOAD... */
6345 if (GET_CODE (src) == MEM && simple_mem (src))
6346 {
6347 ptr = ldst_entry (src);
6348 if (GET_CODE (dest) == REG)
6349 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6350 else
6351 ptr->invalid = 1;
6352 }
6353 else
6354 {
6355 /* Make sure there isn't a buried load somewhere. */
6356 invalidate_any_buried_refs (src);
6357 }
6358
6359 /* Check for stores. Don't worry about aliased ones, they
6360 will block any movement we might do later. We only care
6361 about this exact pattern since those are the only
6362 circumstance that we will ignore the aliasing info. */
6363 if (GET_CODE (dest) == MEM && simple_mem (dest))
6364 {
6365 ptr = ldst_entry (dest);
6366
6367 if (GET_CODE (src) != MEM
6368 && GET_CODE (src) != ASM_OPERANDS)
6369 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6370 else
6371 ptr->invalid = 1;
6372 }
6373 }
6374 else
6375 invalidate_any_buried_refs (PATTERN (insn));
6376 }
6377 }
6378 }
6379 }
6380
6381 /* Remove any references that have been either invalidated or are not in the
6382 expression list for pre gcse. */
6383
6384 static void
6385 trim_ld_motion_mems ()
6386 {
6387 struct ls_expr * last = NULL;
6388 struct ls_expr * ptr = first_ls_expr ();
6389
6390 while (ptr != NULL)
6391 {
6392 int del = ptr->invalid;
6393 struct expr * expr = NULL;
6394
6395 /* Delete if entry has been made invalid. */
6396 if (!del)
6397 {
6398 unsigned int i;
6399
6400 del = 1;
6401 /* Delete if we cannot find this mem in the expression list. */
6402 for (i = 0; i < expr_hash_table_size && del; i++)
6403 {
6404 for (expr = expr_hash_table[i];
6405 expr != NULL;
6406 expr = expr->next_same_hash)
6407 if (expr_equiv_p (expr->expr, ptr->pattern))
6408 {
6409 del = 0;
6410 break;
6411 }
6412 }
6413 }
6414
6415 if (del)
6416 {
6417 if (last != NULL)
6418 {
6419 last->next = ptr->next;
6420 free_ldst_entry (ptr);
6421 ptr = last->next;
6422 }
6423 else
6424 {
6425 pre_ldst_mems = pre_ldst_mems->next;
6426 free_ldst_entry (ptr);
6427 ptr = pre_ldst_mems;
6428 }
6429 }
6430 else
6431 {
6432 /* Set the expression field if we are keeping it. */
6433 last = ptr;
6434 ptr->expr = expr;
6435 ptr = ptr->next;
6436 }
6437 }
6438
6439 /* Show the world what we've found. */
6440 if (gcse_file && pre_ldst_mems != NULL)
6441 print_ldst_list (gcse_file);
6442 }
6443
6444 /* This routine will take an expression which we are replacing with
6445 a reaching register, and update any stores that are needed if
6446 that expression is in the ld_motion list. Stores are updated by
6447 copying their SRC to the reaching register, and then storeing
6448 the reaching register into the store location. These keeps the
6449 correct value in the reaching register for the loads. */
6450
6451 static void
6452 update_ld_motion_stores (expr)
6453 struct expr * expr;
6454 {
6455 struct ls_expr * mem_ptr;
6456
6457 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6458 {
6459 /* We can try to find just the REACHED stores, but is shouldn't
6460 matter to set the reaching reg everywhere... some might be
6461 dead and should be eliminated later. */
6462
6463 /* We replace SET mem = expr with
6464 SET reg = expr
6465 SET mem = reg , where reg is the
6466 reaching reg used in the load. */
6467 rtx list = mem_ptr->stores;
6468
6469 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6470 {
6471 rtx insn = XEXP (list, 0);
6472 rtx pat = PATTERN (insn);
6473 rtx src = SET_SRC (pat);
6474 rtx reg = expr->reaching_reg;
6475 rtx copy, new;
6476
6477 /* If we've already copied it, continue. */
6478 if (expr->reaching_reg == src)
6479 continue;
6480
6481 if (gcse_file)
6482 {
6483 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6484 print_rtl (gcse_file, expr->reaching_reg);
6485 fprintf (gcse_file, ":\n ");
6486 print_inline_rtx (gcse_file, insn, 8);
6487 fprintf (gcse_file, "\n");
6488 }
6489
6490 copy = gen_move_insn ( reg, SET_SRC (pat));
6491 new = emit_insn_before (copy, insn);
6492 record_one_set (REGNO (reg), new);
6493 SET_SRC (pat) = reg;
6494
6495 /* un-recognize this pattern since it's probably different now. */
6496 INSN_CODE (insn) = -1;
6497 gcse_create_count++;
6498 }
6499 }
6500 }
6501 \f
6502 /* Store motion code. */
6503
6504 /* This is used to communicate the target bitvector we want to use in the
6505 reg_set_info routine when called via the note_stores mechanism. */
6506 static sbitmap * regvec;
6507
6508 /* Used in computing the reverse edge graph bit vectors. */
6509 static sbitmap * st_antloc;
6510
6511 /* Global holding the number of store expressions we are dealing with. */
6512 static int num_stores;
6513
6514 /* Checks to set if we need to mark a register set. Called from note_stores. */
6515
6516 static void
6517 reg_set_info (dest, setter, data)
6518 rtx dest, setter ATTRIBUTE_UNUSED;
6519 void * data ATTRIBUTE_UNUSED;
6520 {
6521 if (GET_CODE (dest) == SUBREG)
6522 dest = SUBREG_REG (dest);
6523
6524 if (GET_CODE (dest) == REG)
6525 SET_BIT (*regvec, REGNO (dest));
6526 }
6527
6528 /* Return non-zero if the register operands of expression X are killed
6529 anywhere in basic block BB. */
6530
6531 static int
6532 store_ops_ok (x, bb)
6533 rtx x;
6534 basic_block bb;
6535 {
6536 int i;
6537 enum rtx_code code;
6538 const char * fmt;
6539
6540 /* Repeat is used to turn tail-recursion into iteration. */
6541 repeat:
6542
6543 if (x == 0)
6544 return 1;
6545
6546 code = GET_CODE (x);
6547 switch (code)
6548 {
6549 case REG:
6550 /* If a reg has changed after us in this
6551 block, the operand has been killed. */
6552 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6553
6554 case MEM:
6555 x = XEXP (x, 0);
6556 goto repeat;
6557
6558 case PRE_DEC:
6559 case PRE_INC:
6560 case POST_DEC:
6561 case POST_INC:
6562 return 0;
6563
6564 case PC:
6565 case CC0: /*FIXME*/
6566 case CONST:
6567 case CONST_INT:
6568 case CONST_DOUBLE:
6569 case CONST_VECTOR:
6570 case SYMBOL_REF:
6571 case LABEL_REF:
6572 case ADDR_VEC:
6573 case ADDR_DIFF_VEC:
6574 return 1;
6575
6576 default:
6577 break;
6578 }
6579
6580 i = GET_RTX_LENGTH (code) - 1;
6581 fmt = GET_RTX_FORMAT (code);
6582
6583 for (; i >= 0; i--)
6584 {
6585 if (fmt[i] == 'e')
6586 {
6587 rtx tem = XEXP (x, i);
6588
6589 /* If we are about to do the last recursive call
6590 needed at this level, change it into iteration.
6591 This function is called enough to be worth it. */
6592 if (i == 0)
6593 {
6594 x = tem;
6595 goto repeat;
6596 }
6597
6598 if (! store_ops_ok (tem, bb))
6599 return 0;
6600 }
6601 else if (fmt[i] == 'E')
6602 {
6603 int j;
6604
6605 for (j = 0; j < XVECLEN (x, i); j++)
6606 {
6607 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6608 return 0;
6609 }
6610 }
6611 }
6612
6613 return 1;
6614 }
6615
6616 /* Determine whether insn is MEM store pattern that we will consider moving. */
6617
6618 static void
6619 find_moveable_store (insn)
6620 rtx insn;
6621 {
6622 struct ls_expr * ptr;
6623 rtx dest = PATTERN (insn);
6624
6625 if (GET_CODE (dest) != SET
6626 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6627 return;
6628
6629 dest = SET_DEST (dest);
6630
6631 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6632 || GET_MODE (dest) == BLKmode)
6633 return;
6634
6635 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
6636 return;
6637
6638 if (rtx_varies_p (XEXP (dest, 0), 0))
6639 return;
6640
6641 ptr = ldst_entry (dest);
6642 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6643 }
6644
6645 /* Perform store motion. Much like gcse, except we move expressions the
6646 other way by looking at the flowgraph in reverse. */
6647
6648 static int
6649 compute_store_table ()
6650 {
6651 int ret;
6652 basic_block bb;
6653 unsigned regno;
6654 rtx insn, pat;
6655
6656 max_gcse_regno = max_reg_num ();
6657
6658 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
6659 max_gcse_regno);
6660 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
6661 pre_ldst_mems = 0;
6662
6663 /* Find all the stores we care about. */
6664 FOR_EACH_BB (bb)
6665 {
6666 regvec = & (reg_set_in_block[bb->index]);
6667 for (insn = bb->end;
6668 insn && insn != PREV_INSN (bb->end);
6669 insn = PREV_INSN (insn))
6670 {
6671 /* Ignore anything that is not a normal insn. */
6672 if (! INSN_P (insn))
6673 continue;
6674
6675 if (GET_CODE (insn) == CALL_INSN)
6676 {
6677 bool clobbers_all = false;
6678 #ifdef NON_SAVING_SETJMP
6679 if (NON_SAVING_SETJMP
6680 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
6681 clobbers_all = true;
6682 #endif
6683
6684 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6685 if (clobbers_all
6686 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
6687 SET_BIT (reg_set_in_block[bb->index], regno);
6688 }
6689
6690 pat = PATTERN (insn);
6691 note_stores (pat, reg_set_info, NULL);
6692
6693 /* Now that we've marked regs, look for stores. */
6694 if (GET_CODE (pat) == SET)
6695 find_moveable_store (insn);
6696 }
6697 }
6698
6699 ret = enumerate_ldsts ();
6700
6701 if (gcse_file)
6702 {
6703 fprintf (gcse_file, "Store Motion Expressions.\n");
6704 print_ldst_list (gcse_file);
6705 }
6706
6707 return ret;
6708 }
6709
6710 /* Check to see if the load X is aliased with STORE_PATTERN. */
6711
6712 static int
6713 load_kills_store (x, store_pattern)
6714 rtx x, store_pattern;
6715 {
6716 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
6717 return 1;
6718 return 0;
6719 }
6720
6721 /* Go through the entire insn X, looking for any loads which might alias
6722 STORE_PATTERN. Return 1 if found. */
6723
6724 static int
6725 find_loads (x, store_pattern)
6726 rtx x, store_pattern;
6727 {
6728 const char * fmt;
6729 int i, j;
6730 int ret = 0;
6731
6732 if (!x)
6733 return 0;
6734
6735 if (GET_CODE (x) == SET)
6736 x = SET_SRC (x);
6737
6738 if (GET_CODE (x) == MEM)
6739 {
6740 if (load_kills_store (x, store_pattern))
6741 return 1;
6742 }
6743
6744 /* Recursively process the insn. */
6745 fmt = GET_RTX_FORMAT (GET_CODE (x));
6746
6747 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
6748 {
6749 if (fmt[i] == 'e')
6750 ret |= find_loads (XEXP (x, i), store_pattern);
6751 else if (fmt[i] == 'E')
6752 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6753 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
6754 }
6755 return ret;
6756 }
6757
6758 /* Check if INSN kills the store pattern X (is aliased with it).
6759 Return 1 if it it does. */
6760
6761 static int
6762 store_killed_in_insn (x, insn)
6763 rtx x, insn;
6764 {
6765 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
6766 return 0;
6767
6768 if (GET_CODE (insn) == CALL_INSN)
6769 {
6770 /* A normal or pure call might read from pattern,
6771 but a const call will not. */
6772 return ! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn);
6773 }
6774
6775 if (GET_CODE (PATTERN (insn)) == SET)
6776 {
6777 rtx pat = PATTERN (insn);
6778 /* Check for memory stores to aliased objects. */
6779 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
6780 /* pretend its a load and check for aliasing. */
6781 if (find_loads (SET_DEST (pat), x))
6782 return 1;
6783 return find_loads (SET_SRC (pat), x);
6784 }
6785 else
6786 return find_loads (PATTERN (insn), x);
6787 }
6788
6789 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
6790 within basic block BB. */
6791
6792 static int
6793 store_killed_after (x, insn, bb)
6794 rtx x, insn;
6795 basic_block bb;
6796 {
6797 rtx last = bb->end;
6798
6799 if (insn == last)
6800 return 0;
6801
6802 /* Check if the register operands of the store are OK in this block.
6803 Note that if registers are changed ANYWHERE in the block, we'll
6804 decide we can't move it, regardless of whether it changed above
6805 or below the store. This could be improved by checking the register
6806 operands while lookinng for aliasing in each insn. */
6807 if (!store_ops_ok (XEXP (x, 0), bb))
6808 return 1;
6809
6810 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
6811 if (store_killed_in_insn (x, insn))
6812 return 1;
6813
6814 return 0;
6815 }
6816
6817 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
6818 within basic block BB. */
6819 static int
6820 store_killed_before (x, insn, bb)
6821 rtx x, insn;
6822 basic_block bb;
6823 {
6824 rtx first = bb->head;
6825
6826 if (insn == first)
6827 return store_killed_in_insn (x, insn);
6828
6829 /* Check if the register operands of the store are OK in this block.
6830 Note that if registers are changed ANYWHERE in the block, we'll
6831 decide we can't move it, regardless of whether it changed above
6832 or below the store. This could be improved by checking the register
6833 operands while lookinng for aliasing in each insn. */
6834 if (!store_ops_ok (XEXP (x, 0), bb))
6835 return 1;
6836
6837 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
6838 if (store_killed_in_insn (x, insn))
6839 return 1;
6840
6841 return 0;
6842 }
6843
6844 #define ANTIC_STORE_LIST(x) ((x)->loads)
6845 #define AVAIL_STORE_LIST(x) ((x)->stores)
6846
6847 /* Given the table of available store insns at the end of blocks,
6848 determine which ones are not killed by aliasing, and generate
6849 the appropriate vectors for gen and killed. */
6850 static void
6851 build_store_vectors ()
6852 {
6853 basic_block bb, b;
6854 rtx insn, st;
6855 struct ls_expr * ptr;
6856
6857 /* Build the gen_vector. This is any store in the table which is not killed
6858 by aliasing later in its block. */
6859 ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
6860 sbitmap_vector_zero (ae_gen, last_basic_block);
6861
6862 st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
6863 sbitmap_vector_zero (st_antloc, last_basic_block);
6864
6865 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6866 {
6867 /* Put all the stores into either the antic list, or the avail list,
6868 or both. */
6869 rtx store_list = ptr->stores;
6870 ptr->stores = NULL_RTX;
6871
6872 for (st = store_list; st != NULL; st = XEXP (st, 1))
6873 {
6874 insn = XEXP (st, 0);
6875 bb = BLOCK_FOR_INSN (insn);
6876
6877 if (!store_killed_after (ptr->pattern, insn, bb))
6878 {
6879 /* If we've already seen an availale expression in this block,
6880 we can delete the one we saw already (It occurs earlier in
6881 the block), and replace it with this one). We'll copy the
6882 old SRC expression to an unused register in case there
6883 are any side effects. */
6884 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6885 {
6886 /* Find previous store. */
6887 rtx st;
6888 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
6889 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
6890 break;
6891 if (st)
6892 {
6893 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6894 if (gcse_file)
6895 fprintf (gcse_file, "Removing redundant store:\n");
6896 replace_store_insn (r, XEXP (st, 0), bb);
6897 XEXP (st, 0) = insn;
6898 continue;
6899 }
6900 }
6901 SET_BIT (ae_gen[bb->index], ptr->index);
6902 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6903 AVAIL_STORE_LIST (ptr));
6904 }
6905
6906 if (!store_killed_before (ptr->pattern, insn, bb))
6907 {
6908 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
6909 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
6910 ANTIC_STORE_LIST (ptr));
6911 }
6912 }
6913
6914 /* Free the original list of store insns. */
6915 free_INSN_LIST_list (&store_list);
6916 }
6917
6918 ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
6919 sbitmap_vector_zero (ae_kill, last_basic_block);
6920
6921 transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
6922 sbitmap_vector_zero (transp, last_basic_block);
6923
6924 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6925 FOR_EACH_BB (b)
6926 {
6927 if (store_killed_after (ptr->pattern, b->head, b))
6928 {
6929 /* The anticipatable expression is not killed if it's gen'd. */
6930 /*
6931 We leave this check out for now. If we have a code sequence
6932 in a block which looks like:
6933 ST MEMa = x
6934 L y = MEMa
6935 ST MEMa = z
6936 We should flag this as having an ANTIC expression, NOT
6937 transparent, NOT killed, and AVAIL.
6938 Unfortunately, since we haven't re-written all loads to
6939 use the reaching reg, we'll end up doing an incorrect
6940 Load in the middle here if we push the store down. It happens in
6941 gcc.c-torture/execute/960311-1.c with -O3
6942 If we always kill it in this case, we'll sometimes do
6943 uneccessary work, but it shouldn't actually hurt anything.
6944 if (!TEST_BIT (ae_gen[b], ptr->index)). */
6945 SET_BIT (ae_kill[b->index], ptr->index);
6946 }
6947 else
6948 SET_BIT (transp[b->index], ptr->index);
6949 }
6950
6951 /* Any block with no exits calls some non-returning function, so
6952 we better mark the store killed here, or we might not store to
6953 it at all. If we knew it was abort, we wouldn't have to store,
6954 but we don't know that for sure. */
6955 if (gcse_file)
6956 {
6957 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
6958 print_ldst_list (gcse_file);
6959 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
6960 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
6961 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
6962 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
6963 }
6964 }
6965
6966 /* Insert an instruction at the begining of a basic block, and update
6967 the BLOCK_HEAD if needed. */
6968
6969 static void
6970 insert_insn_start_bb (insn, bb)
6971 rtx insn;
6972 basic_block bb;
6973 {
6974 /* Insert at start of successor block. */
6975 rtx prev = PREV_INSN (bb->head);
6976 rtx before = bb->head;
6977 while (before != 0)
6978 {
6979 if (GET_CODE (before) != CODE_LABEL
6980 && (GET_CODE (before) != NOTE
6981 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6982 break;
6983 prev = before;
6984 if (prev == bb->end)
6985 break;
6986 before = NEXT_INSN (before);
6987 }
6988
6989 insn = emit_insn_after (insn, prev);
6990
6991 if (gcse_file)
6992 {
6993 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6994 bb->index);
6995 print_inline_rtx (gcse_file, insn, 6);
6996 fprintf (gcse_file, "\n");
6997 }
6998 }
6999
7000 /* This routine will insert a store on an edge. EXPR is the ldst entry for
7001 the memory reference, and E is the edge to insert it on. Returns non-zero
7002 if an edge insertion was performed. */
7003
7004 static int
7005 insert_store (expr, e)
7006 struct ls_expr * expr;
7007 edge e;
7008 {
7009 rtx reg, insn;
7010 basic_block bb;
7011 edge tmp;
7012
7013 /* We did all the deleted before this insert, so if we didn't delete a
7014 store, then we haven't set the reaching reg yet either. */
7015 if (expr->reaching_reg == NULL_RTX)
7016 return 0;
7017
7018 reg = expr->reaching_reg;
7019 insn = gen_move_insn (expr->pattern, reg);
7020
7021 /* If we are inserting this expression on ALL predecessor edges of a BB,
7022 insert it at the start of the BB, and reset the insert bits on the other
7023 edges so we don't try to insert it on the other edges. */
7024 bb = e->dest;
7025 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7026 {
7027 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7028 if (index == EDGE_INDEX_NO_EDGE)
7029 abort ();
7030 if (! TEST_BIT (pre_insert_map[index], expr->index))
7031 break;
7032 }
7033
7034 /* If tmp is NULL, we found an insertion on every edge, blank the
7035 insertion vector for these edges, and insert at the start of the BB. */
7036 if (!tmp && bb != EXIT_BLOCK_PTR)
7037 {
7038 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7039 {
7040 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7041 RESET_BIT (pre_insert_map[index], expr->index);
7042 }
7043 insert_insn_start_bb (insn, bb);
7044 return 0;
7045 }
7046
7047 /* We can't insert on this edge, so we'll insert at the head of the
7048 successors block. See Morgan, sec 10.5. */
7049 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
7050 {
7051 insert_insn_start_bb (insn, bb);
7052 return 0;
7053 }
7054
7055 insert_insn_on_edge (insn, e);
7056
7057 if (gcse_file)
7058 {
7059 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
7060 e->src->index, e->dest->index);
7061 print_inline_rtx (gcse_file, insn, 6);
7062 fprintf (gcse_file, "\n");
7063 }
7064
7065 return 1;
7066 }
7067
7068 /* This routine will replace a store with a SET to a specified register. */
7069
7070 static void
7071 replace_store_insn (reg, del, bb)
7072 rtx reg, del;
7073 basic_block bb;
7074 {
7075 rtx insn;
7076
7077 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
7078 insn = emit_insn_after (insn, del);
7079
7080 if (gcse_file)
7081 {
7082 fprintf (gcse_file,
7083 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
7084 print_inline_rtx (gcse_file, del, 6);
7085 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
7086 print_inline_rtx (gcse_file, insn, 6);
7087 fprintf (gcse_file, "\n");
7088 }
7089
7090 delete_insn (del);
7091 }
7092
7093
7094 /* Delete a store, but copy the value that would have been stored into
7095 the reaching_reg for later storing. */
7096
7097 static void
7098 delete_store (expr, bb)
7099 struct ls_expr * expr;
7100 basic_block bb;
7101 {
7102 rtx reg, i, del;
7103
7104 if (expr->reaching_reg == NULL_RTX)
7105 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7106
7107
7108 /* If there is more than 1 store, the earlier ones will be dead,
7109 but it doesn't hurt to replace them here. */
7110 reg = expr->reaching_reg;
7111
7112 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7113 {
7114 del = XEXP (i, 0);
7115 if (BLOCK_FOR_INSN (del) == bb)
7116 {
7117 /* We know there is only one since we deleted redundant
7118 ones during the available computation. */
7119 replace_store_insn (reg, del, bb);
7120 break;
7121 }
7122 }
7123 }
7124
7125 /* Free memory used by store motion. */
7126
7127 static void
7128 free_store_memory ()
7129 {
7130 free_ldst_mems ();
7131
7132 if (ae_gen)
7133 sbitmap_vector_free (ae_gen);
7134 if (ae_kill)
7135 sbitmap_vector_free (ae_kill);
7136 if (transp)
7137 sbitmap_vector_free (transp);
7138 if (st_antloc)
7139 sbitmap_vector_free (st_antloc);
7140 if (pre_insert_map)
7141 sbitmap_vector_free (pre_insert_map);
7142 if (pre_delete_map)
7143 sbitmap_vector_free (pre_delete_map);
7144 if (reg_set_in_block)
7145 sbitmap_vector_free (reg_set_in_block);
7146
7147 ae_gen = ae_kill = transp = st_antloc = NULL;
7148 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7149 }
7150
7151 /* Perform store motion. Much like gcse, except we move expressions the
7152 other way by looking at the flowgraph in reverse. */
7153
7154 static void
7155 store_motion ()
7156 {
7157 basic_block bb;
7158 int x;
7159 struct ls_expr * ptr;
7160 int update_flow = 0;
7161
7162 if (gcse_file)
7163 {
7164 fprintf (gcse_file, "before store motion\n");
7165 print_rtl (gcse_file, get_insns ());
7166 }
7167
7168
7169 init_alias_analysis ();
7170
7171 /* Find all the stores that are live to the end of their block. */
7172 num_stores = compute_store_table ();
7173 if (num_stores == 0)
7174 {
7175 sbitmap_vector_free (reg_set_in_block);
7176 end_alias_analysis ();
7177 return;
7178 }
7179
7180 /* Now compute whats actually available to move. */
7181 add_noreturn_fake_exit_edges ();
7182 build_store_vectors ();
7183
7184 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7185 st_antloc, ae_kill, &pre_insert_map,
7186 &pre_delete_map);
7187
7188 /* Now we want to insert the new stores which are going to be needed. */
7189 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7190 {
7191 FOR_EACH_BB (bb)
7192 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
7193 delete_store (ptr, bb);
7194
7195 for (x = 0; x < NUM_EDGES (edge_list); x++)
7196 if (TEST_BIT (pre_insert_map[x], ptr->index))
7197 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7198 }
7199
7200 if (update_flow)
7201 commit_edge_insertions ();
7202
7203 free_store_memory ();
7204 free_edge_list (edge_list);
7205 remove_fake_edges ();
7206 end_alias_analysis ();
7207 }
7208
7209 #include "gt-gcse.h"