]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
gcse.c (oprs_unchanged_p): Respect flag_gcse_lm.
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "tm.h"
139 #include "diagnostic-core.h"
140 #include "toplev.h"
141
142 #include "hard-reg-set.h"
143 #include "rtl.h"
144 #include "tree.h"
145 #include "tm_p.h"
146 #include "regs.h"
147 #include "ira.h"
148 #include "flags.h"
149 #include "insn-config.h"
150 #include "recog.h"
151 #include "basic-block.h"
152 #include "function.h"
153 #include "expr.h"
154 #include "except.h"
155 #include "ggc.h"
156 #include "params.h"
157 #include "cselib.h"
158 #include "intl.h"
159 #include "obstack.h"
160 #include "tree-pass.h"
161 #include "hashtab.h"
162 #include "df.h"
163 #include "dbgcnt.h"
164 #include "target.h"
165 #include "gcse.h"
166
167 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
168 are a superset of those done by classic GCSE.
169
170 Two passes of copy/constant propagation are done around PRE or hoisting
171 because the first one enables more GCSE and the second one helps to clean
172 up the copies that PRE and HOIST create. This is needed more for PRE than
173 for HOIST because code hoisting will try to use an existing register
174 containing the common subexpression rather than create a new one. This is
175 harder to do for PRE because of the code motion (which HOIST doesn't do).
176
177 Expressions we are interested in GCSE-ing are of the form
178 (set (pseudo-reg) (expression)).
179 Function want_to_gcse_p says what these are.
180
181 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
182 This allows PRE to hoist expressions that are expressed in multiple insns,
183 such as complex address calculations (e.g. for PIC code, or loads with a
184 high part and a low part).
185
186 PRE handles moving invariant expressions out of loops (by treating them as
187 partially redundant).
188
189 **********************
190
191 We used to support multiple passes but there are diminishing returns in
192 doing so. The first pass usually makes 90% of the changes that are doable.
193 A second pass can make a few more changes made possible by the first pass.
194 Experiments show any further passes don't make enough changes to justify
195 the expense.
196
197 A study of spec92 using an unlimited number of passes:
198 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
199 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
200 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
201
202 It was found doing copy propagation between each pass enables further
203 substitutions.
204
205 This study was done before expressions in REG_EQUAL notes were added as
206 candidate expressions for optimization, and before the GIMPLE optimizers
207 were added. Probably, multiple passes is even less efficient now than
208 at the time when the study was conducted.
209
210 PRE is quite expensive in complicated functions because the DFA can take
211 a while to converge. Hence we only perform one pass.
212
213 **********************
214
215 The steps for PRE are:
216
217 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
218
219 2) Perform the data flow analysis for PRE.
220
221 3) Delete the redundant instructions
222
223 4) Insert the required copies [if any] that make the partially
224 redundant instructions fully redundant.
225
226 5) For other reaching expressions, insert an instruction to copy the value
227 to a newly created pseudo that will reach the redundant instruction.
228
229 The deletion is done first so that when we do insertions we
230 know which pseudo reg to use.
231
232 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
233 argue it is not. The number of iterations for the algorithm to converge
234 is typically 2-4 so I don't view it as that expensive (relatively speaking).
235
236 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
237 we create. To make an expression reach the place where it's redundant,
238 the result of the expression is copied to a new register, and the redundant
239 expression is deleted by replacing it with this new register. Classic GCSE
240 doesn't have this problem as much as it computes the reaching defs of
241 each register in each block and thus can try to use an existing
242 register. */
243 \f
244 /* GCSE global vars. */
245
246 struct target_gcse default_target_gcse;
247 #if SWITCHABLE_TARGET
248 struct target_gcse *this_target_gcse = &default_target_gcse;
249 #endif
250
251 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
252 int flag_rerun_cse_after_global_opts;
253
254 /* An obstack for our working variables. */
255 static struct obstack gcse_obstack;
256
257 /* Hash table of expressions. */
258
259 struct expr
260 {
261 /* The expression. */
262 rtx expr;
263 /* Index in the available expression bitmaps. */
264 int bitmap_index;
265 /* Next entry with the same hash. */
266 struct expr *next_same_hash;
267 /* List of anticipatable occurrences in basic blocks in the function.
268 An "anticipatable occurrence" is one that is the first occurrence in the
269 basic block, the operands are not modified in the basic block prior
270 to the occurrence and the output is not used between the start of
271 the block and the occurrence. */
272 struct occr *antic_occr;
273 /* List of available occurrence in basic blocks in the function.
274 An "available occurrence" is one that is the last occurrence in the
275 basic block and the operands are not modified by following statements in
276 the basic block [including this insn]. */
277 struct occr *avail_occr;
278 /* Non-null if the computation is PRE redundant.
279 The value is the newly created pseudo-reg to record a copy of the
280 expression in all the places that reach the redundant copy. */
281 rtx reaching_reg;
282 /* Maximum distance in instructions this expression can travel.
283 We avoid moving simple expressions for more than a few instructions
284 to keep register pressure under control.
285 A value of "0" removes restrictions on how far the expression can
286 travel. */
287 int max_distance;
288 };
289
290 /* Occurrence of an expression.
291 There is one per basic block. If a pattern appears more than once the
292 last appearance is used [or first for anticipatable expressions]. */
293
294 struct occr
295 {
296 /* Next occurrence of this expression. */
297 struct occr *next;
298 /* The insn that computes the expression. */
299 rtx insn;
300 /* Nonzero if this [anticipatable] occurrence has been deleted. */
301 char deleted_p;
302 /* Nonzero if this [available] occurrence has been copied to
303 reaching_reg. */
304 /* ??? This is mutually exclusive with deleted_p, so they could share
305 the same byte. */
306 char copied_p;
307 };
308
309 typedef struct occr *occr_t;
310
311 /* Expression hash tables.
312 Each hash table is an array of buckets.
313 ??? It is known that if it were an array of entries, structure elements
314 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
315 not clear whether in the final analysis a sufficient amount of memory would
316 be saved as the size of the available expression bitmaps would be larger
317 [one could build a mapping table without holes afterwards though].
318 Someday I'll perform the computation and figure it out. */
319
320 struct hash_table_d
321 {
322 /* The table itself.
323 This is an array of `expr_hash_table_size' elements. */
324 struct expr **table;
325
326 /* Size of the hash table, in elements. */
327 unsigned int size;
328
329 /* Number of hash table elements. */
330 unsigned int n_elems;
331 };
332
333 /* Expression hash table. */
334 static struct hash_table_d expr_hash_table;
335
336 /* This is a list of expressions which are MEMs and will be used by load
337 or store motion.
338 Load motion tracks MEMs which aren't killed by anything except itself,
339 i.e. loads and stores to a single location.
340 We can then allow movement of these MEM refs with a little special
341 allowance. (all stores copy the same value to the reaching reg used
342 for the loads). This means all values used to store into memory must have
343 no side effects so we can re-issue the setter value. */
344
345 struct ls_expr
346 {
347 struct expr * expr; /* Gcse expression reference for LM. */
348 rtx pattern; /* Pattern of this mem. */
349 rtx pattern_regs; /* List of registers mentioned by the mem. */
350 rtx loads; /* INSN list of loads seen. */
351 rtx stores; /* INSN list of stores seen. */
352 struct ls_expr * next; /* Next in the list. */
353 int invalid; /* Invalid for some reason. */
354 int index; /* If it maps to a bitmap index. */
355 unsigned int hash_index; /* Index when in a hash table. */
356 rtx reaching_reg; /* Register to use when re-writing. */
357 };
358
359 /* Head of the list of load/store memory refs. */
360 static struct ls_expr * pre_ldst_mems = NULL;
361
362 /* Hashtable for the load/store memory refs. */
363 static htab_t pre_ldst_table = NULL;
364
365 /* Bitmap containing one bit for each register in the program.
366 Used when performing GCSE to track which registers have been set since
367 the start of the basic block. */
368 static regset reg_set_bitmap;
369
370 /* Array, indexed by basic block number for a list of insns which modify
371 memory within that block. */
372 static vec<rtx> *modify_mem_list;
373 static bitmap modify_mem_list_set;
374
375 typedef struct modify_pair_s
376 {
377 rtx dest; /* A MEM. */
378 rtx dest_addr; /* The canonical address of `dest'. */
379 } modify_pair;
380
381
382 /* This array parallels modify_mem_list, except that it stores MEMs
383 being set and their canonicalized memory addresses. */
384 static vec<modify_pair> *canon_modify_mem_list;
385
386 /* Bitmap indexed by block numbers to record which blocks contain
387 function calls. */
388 static bitmap blocks_with_calls;
389
390 /* Various variables for statistics gathering. */
391
392 /* Memory used in a pass.
393 This isn't intended to be absolutely precise. Its intent is only
394 to keep an eye on memory usage. */
395 static int bytes_used;
396
397 /* GCSE substitutions made. */
398 static int gcse_subst_count;
399 /* Number of copy instructions created. */
400 static int gcse_create_count;
401 \f
402 /* Doing code hoisting. */
403 static bool doing_code_hoisting_p = false;
404 \f
405 /* For available exprs */
406 static sbitmap *ae_kill;
407 \f
408 /* Data stored for each basic block. */
409 struct bb_data
410 {
411 /* Maximal register pressure inside basic block for given register class
412 (defined only for the pressure classes). */
413 int max_reg_pressure[N_REG_CLASSES];
414 /* Recorded register pressure of basic block before trying to hoist
415 an expression. Will be used to restore the register pressure
416 if the expression should not be hoisted. */
417 int old_pressure;
418 /* Recorded register live_in info of basic block during code hoisting
419 process. BACKUP is used to record live_in info before trying to
420 hoist an expression, and will be used to restore LIVE_IN if the
421 expression should not be hoisted. */
422 bitmap live_in, backup;
423 };
424
425 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
426
427 static basic_block curr_bb;
428
429 /* Current register pressure for each pressure class. */
430 static int curr_reg_pressure[N_REG_CLASSES];
431 \f
432
433 static void compute_can_copy (void);
434 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
435 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
436 static void *gcse_alloc (unsigned long);
437 static void alloc_gcse_mem (void);
438 static void free_gcse_mem (void);
439 static void hash_scan_insn (rtx, struct hash_table_d *);
440 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
441 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
442 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
443 static int want_to_gcse_p (rtx, int *);
444 static int oprs_unchanged_p (const_rtx, const_rtx, int);
445 static int oprs_anticipatable_p (const_rtx, const_rtx);
446 static int oprs_available_p (const_rtx, const_rtx);
447 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
448 struct hash_table_d *);
449 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
450 static int expr_equiv_p (const_rtx, const_rtx);
451 static void record_last_reg_set_info (rtx, int);
452 static void record_last_mem_set_info (rtx);
453 static void record_last_set_info (rtx, const_rtx, void *);
454 static void compute_hash_table (struct hash_table_d *);
455 static void alloc_hash_table (struct hash_table_d *);
456 static void free_hash_table (struct hash_table_d *);
457 static void compute_hash_table_work (struct hash_table_d *);
458 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
459 static void compute_transp (const_rtx, int, sbitmap *);
460 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
461 struct hash_table_d *);
462 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
463 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
464 static void canon_list_insert (rtx, const_rtx, void *);
465 static void alloc_pre_mem (int, int);
466 static void free_pre_mem (void);
467 static struct edge_list *compute_pre_data (void);
468 static int pre_expr_reaches_here_p (basic_block, struct expr *,
469 basic_block);
470 static void insert_insn_end_basic_block (struct expr *, basic_block);
471 static void pre_insert_copy_insn (struct expr *, rtx);
472 static void pre_insert_copies (void);
473 static int pre_delete (void);
474 static int pre_gcse (struct edge_list *);
475 static int one_pre_gcse_pass (void);
476 static void add_label_notes (rtx, rtx);
477 static void alloc_code_hoist_mem (int, int);
478 static void free_code_hoist_mem (void);
479 static void compute_code_hoist_vbeinout (void);
480 static void compute_code_hoist_data (void);
481 static int should_hoist_expr_to_dom (basic_block, struct expr *, basic_block,
482 sbitmap, int, int *, enum reg_class,
483 int *, bitmap, rtx);
484 static int hoist_code (void);
485 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
486 static enum reg_class get_pressure_class_and_nregs (rtx insn, int *nregs);
487 static int one_code_hoisting_pass (void);
488 static rtx process_insert_insn (struct expr *);
489 static int pre_edge_insert (struct edge_list *, struct expr **);
490 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
491 basic_block, char *);
492 static struct ls_expr * ldst_entry (rtx);
493 static void free_ldst_entry (struct ls_expr *);
494 static void free_ld_motion_mems (void);
495 static void print_ldst_list (FILE *);
496 static struct ls_expr * find_rtx_in_ldst (rtx);
497 static int simple_mem (const_rtx);
498 static void invalidate_any_buried_refs (rtx);
499 static void compute_ld_motion_mems (void);
500 static void trim_ld_motion_mems (void);
501 static void update_ld_motion_stores (struct expr *);
502 static void clear_modify_mem_tables (void);
503 static void free_modify_mem_tables (void);
504 static rtx gcse_emit_move_after (rtx, rtx, rtx);
505 static bool is_too_expensive (const char *);
506
507 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
508 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
509
510 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
511 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
512
513 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
514 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
515
516 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
517 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
518 \f
519 /* Misc. utilities. */
520
521 #define can_copy \
522 (this_target_gcse->x_can_copy)
523 #define can_copy_init_p \
524 (this_target_gcse->x_can_copy_init_p)
525
526 /* Compute which modes support reg/reg copy operations. */
527
528 static void
529 compute_can_copy (void)
530 {
531 int i;
532 #ifndef AVOID_CCMODE_COPIES
533 rtx reg, insn;
534 #endif
535 memset (can_copy, 0, NUM_MACHINE_MODES);
536
537 start_sequence ();
538 for (i = 0; i < NUM_MACHINE_MODES; i++)
539 if (GET_MODE_CLASS (i) == MODE_CC)
540 {
541 #ifdef AVOID_CCMODE_COPIES
542 can_copy[i] = 0;
543 #else
544 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
545 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
546 if (recog (PATTERN (insn), insn, NULL) >= 0)
547 can_copy[i] = 1;
548 #endif
549 }
550 else
551 can_copy[i] = 1;
552
553 end_sequence ();
554 }
555
556 /* Returns whether the mode supports reg/reg copy operations. */
557
558 bool
559 can_copy_p (enum machine_mode mode)
560 {
561 if (! can_copy_init_p)
562 {
563 compute_can_copy ();
564 can_copy_init_p = true;
565 }
566
567 return can_copy[mode] != 0;
568 }
569 \f
570 /* Cover function to xmalloc to record bytes allocated. */
571
572 static void *
573 gmalloc (size_t size)
574 {
575 bytes_used += size;
576 return xmalloc (size);
577 }
578
579 /* Cover function to xcalloc to record bytes allocated. */
580
581 static void *
582 gcalloc (size_t nelem, size_t elsize)
583 {
584 bytes_used += nelem * elsize;
585 return xcalloc (nelem, elsize);
586 }
587
588 /* Cover function to obstack_alloc. */
589
590 static void *
591 gcse_alloc (unsigned long size)
592 {
593 bytes_used += size;
594 return obstack_alloc (&gcse_obstack, size);
595 }
596
597 /* Allocate memory for the reg/memory set tracking tables.
598 This is called at the start of each pass. */
599
600 static void
601 alloc_gcse_mem (void)
602 {
603 /* Allocate vars to track sets of regs. */
604 reg_set_bitmap = ALLOC_REG_SET (NULL);
605
606 /* Allocate array to keep a list of insns which modify memory in each
607 basic block. The two typedefs are needed to work around the
608 pre-processor limitation with template types in macro arguments. */
609 typedef vec<rtx> vec_rtx_heap;
610 typedef vec<modify_pair> vec_modify_pair_heap;
611 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block);
612 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap, last_basic_block);
613 modify_mem_list_set = BITMAP_ALLOC (NULL);
614 blocks_with_calls = BITMAP_ALLOC (NULL);
615 }
616
617 /* Free memory allocated by alloc_gcse_mem. */
618
619 static void
620 free_gcse_mem (void)
621 {
622 FREE_REG_SET (reg_set_bitmap);
623
624 free_modify_mem_tables ();
625 BITMAP_FREE (modify_mem_list_set);
626 BITMAP_FREE (blocks_with_calls);
627 }
628 \f
629 /* Compute the local properties of each recorded expression.
630
631 Local properties are those that are defined by the block, irrespective of
632 other blocks.
633
634 An expression is transparent in a block if its operands are not modified
635 in the block.
636
637 An expression is computed (locally available) in a block if it is computed
638 at least once and expression would contain the same value if the
639 computation was moved to the end of the block.
640
641 An expression is locally anticipatable in a block if it is computed at
642 least once and expression would contain the same value if the computation
643 was moved to the beginning of the block.
644
645 We call this routine for pre and code hoisting. They all compute
646 basically the same information and thus can easily share this code.
647
648 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
649 properties. If NULL, then it is not necessary to compute or record that
650 particular property.
651
652 TABLE controls which hash table to look at. */
653
654 static void
655 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
656 struct hash_table_d *table)
657 {
658 unsigned int i;
659
660 /* Initialize any bitmaps that were passed in. */
661 if (transp)
662 {
663 bitmap_vector_ones (transp, last_basic_block);
664 }
665
666 if (comp)
667 bitmap_vector_clear (comp, last_basic_block);
668 if (antloc)
669 bitmap_vector_clear (antloc, last_basic_block);
670
671 for (i = 0; i < table->size; i++)
672 {
673 struct expr *expr;
674
675 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
676 {
677 int indx = expr->bitmap_index;
678 struct occr *occr;
679
680 /* The expression is transparent in this block if it is not killed.
681 We start by assuming all are transparent [none are killed], and
682 then reset the bits for those that are. */
683 if (transp)
684 compute_transp (expr->expr, indx, transp);
685
686 /* The occurrences recorded in antic_occr are exactly those that
687 we want to set to nonzero in ANTLOC. */
688 if (antloc)
689 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
690 {
691 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
692
693 /* While we're scanning the table, this is a good place to
694 initialize this. */
695 occr->deleted_p = 0;
696 }
697
698 /* The occurrences recorded in avail_occr are exactly those that
699 we want to set to nonzero in COMP. */
700 if (comp)
701 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
702 {
703 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
704
705 /* While we're scanning the table, this is a good place to
706 initialize this. */
707 occr->copied_p = 0;
708 }
709
710 /* While we're scanning the table, this is a good place to
711 initialize this. */
712 expr->reaching_reg = 0;
713 }
714 }
715 }
716 \f
717 /* Hash table support. */
718
719 struct reg_avail_info
720 {
721 basic_block last_bb;
722 int first_set;
723 int last_set;
724 };
725
726 static struct reg_avail_info *reg_avail_info;
727 static basic_block current_bb;
728
729 /* See whether X, the source of a set, is something we want to consider for
730 GCSE. */
731
732 static int
733 want_to_gcse_p (rtx x, int *max_distance_ptr)
734 {
735 #ifdef STACK_REGS
736 /* On register stack architectures, don't GCSE constants from the
737 constant pool, as the benefits are often swamped by the overhead
738 of shuffling the register stack between basic blocks. */
739 if (IS_STACK_MODE (GET_MODE (x)))
740 x = avoid_constant_pool_reference (x);
741 #endif
742
743 /* GCSE'ing constants:
744
745 We do not specifically distinguish between constant and non-constant
746 expressions in PRE and Hoist. We use set_src_cost below to limit
747 the maximum distance simple expressions can travel.
748
749 Nevertheless, constants are much easier to GCSE, and, hence,
750 it is easy to overdo the optimizations. Usually, excessive PRE and
751 Hoisting of constant leads to increased register pressure.
752
753 RA can deal with this by rematerialing some of the constants.
754 Therefore, it is important that the back-end generates sets of constants
755 in a way that allows reload rematerialize them under high register
756 pressure, i.e., a pseudo register with REG_EQUAL to constant
757 is set only once. Failing to do so will result in IRA/reload
758 spilling such constants under high register pressure instead of
759 rematerializing them. */
760
761 switch (GET_CODE (x))
762 {
763 case REG:
764 case SUBREG:
765 case CALL:
766 return 0;
767
768 CASE_CONST_ANY:
769 if (!doing_code_hoisting_p)
770 /* Do not PRE constants. */
771 return 0;
772
773 /* FALLTHRU */
774
775 default:
776 if (doing_code_hoisting_p)
777 /* PRE doesn't implement max_distance restriction. */
778 {
779 int cost;
780 int max_distance;
781
782 gcc_assert (!optimize_function_for_speed_p (cfun)
783 && optimize_function_for_size_p (cfun));
784 cost = set_src_cost (x, 0);
785
786 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
787 {
788 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
789 if (max_distance == 0)
790 return 0;
791
792 gcc_assert (max_distance > 0);
793 }
794 else
795 max_distance = 0;
796
797 if (max_distance_ptr)
798 *max_distance_ptr = max_distance;
799 }
800
801 return can_assign_to_reg_without_clobbers_p (x);
802 }
803 }
804
805 /* Used internally by can_assign_to_reg_without_clobbers_p. */
806
807 static GTY(()) rtx test_insn;
808
809 /* Return true if we can assign X to a pseudo register such that the
810 resulting insn does not result in clobbering a hard register as a
811 side-effect.
812
813 Additionally, if the target requires it, check that the resulting insn
814 can be copied. If it cannot, this means that X is special and probably
815 has hidden side-effects we don't want to mess with.
816
817 This function is typically used by code motion passes, to verify
818 that it is safe to insert an insn without worrying about clobbering
819 maybe live hard regs. */
820
821 bool
822 can_assign_to_reg_without_clobbers_p (rtx x)
823 {
824 int num_clobbers = 0;
825 int icode;
826
827 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
828 if (general_operand (x, GET_MODE (x)))
829 return 1;
830 else if (GET_MODE (x) == VOIDmode)
831 return 0;
832
833 /* Otherwise, check if we can make a valid insn from it. First initialize
834 our test insn if we haven't already. */
835 if (test_insn == 0)
836 {
837 test_insn
838 = make_insn_raw (gen_rtx_SET (VOIDmode,
839 gen_rtx_REG (word_mode,
840 FIRST_PSEUDO_REGISTER * 2),
841 const0_rtx));
842 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
843 }
844
845 /* Now make an insn like the one we would make when GCSE'ing and see if
846 valid. */
847 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
848 SET_SRC (PATTERN (test_insn)) = x;
849
850 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
851 if (icode < 0)
852 return false;
853
854 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
855 return false;
856
857 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
858 return false;
859
860 return true;
861 }
862
863 /* Return nonzero if the operands of expression X are unchanged from the
864 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
865 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
866
867 static int
868 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
869 {
870 int i, j;
871 enum rtx_code code;
872 const char *fmt;
873
874 if (x == 0)
875 return 1;
876
877 code = GET_CODE (x);
878 switch (code)
879 {
880 case REG:
881 {
882 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
883
884 if (info->last_bb != current_bb)
885 return 1;
886 if (avail_p)
887 return info->last_set < DF_INSN_LUID (insn);
888 else
889 return info->first_set >= DF_INSN_LUID (insn);
890 }
891
892 case MEM:
893 if (! flag_gcse_lm
894 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
895 x, avail_p))
896 return 0;
897 else
898 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
899
900 case PRE_DEC:
901 case PRE_INC:
902 case POST_DEC:
903 case POST_INC:
904 case PRE_MODIFY:
905 case POST_MODIFY:
906 return 0;
907
908 case PC:
909 case CC0: /*FIXME*/
910 case CONST:
911 CASE_CONST_ANY:
912 case SYMBOL_REF:
913 case LABEL_REF:
914 case ADDR_VEC:
915 case ADDR_DIFF_VEC:
916 return 1;
917
918 default:
919 break;
920 }
921
922 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
923 {
924 if (fmt[i] == 'e')
925 {
926 /* If we are about to do the last recursive call needed at this
927 level, change it into iteration. This function is called enough
928 to be worth it. */
929 if (i == 0)
930 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
931
932 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
933 return 0;
934 }
935 else if (fmt[i] == 'E')
936 for (j = 0; j < XVECLEN (x, i); j++)
937 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
938 return 0;
939 }
940
941 return 1;
942 }
943
944 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
945
946 struct mem_conflict_info
947 {
948 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
949 see if a memory store conflicts with this memory load. */
950 const_rtx mem;
951
952 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
953 references. */
954 bool conflict;
955 };
956
957 /* DEST is the output of an instruction. If it is a memory reference and
958 possibly conflicts with the load found in DATA, then communicate this
959 information back through DATA. */
960
961 static void
962 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
963 void *data)
964 {
965 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
966
967 while (GET_CODE (dest) == SUBREG
968 || GET_CODE (dest) == ZERO_EXTRACT
969 || GET_CODE (dest) == STRICT_LOW_PART)
970 dest = XEXP (dest, 0);
971
972 /* If DEST is not a MEM, then it will not conflict with the load. Note
973 that function calls are assumed to clobber memory, but are handled
974 elsewhere. */
975 if (! MEM_P (dest))
976 return;
977
978 /* If we are setting a MEM in our list of specially recognized MEMs,
979 don't mark as killed this time. */
980 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
981 {
982 if (!find_rtx_in_ldst (dest))
983 mci->conflict = true;
984 return;
985 }
986
987 if (true_dependence (dest, GET_MODE (dest), mci->mem))
988 mci->conflict = true;
989 }
990
991 /* Return nonzero if the expression in X (a memory reference) is killed
992 in block BB before or after the insn with the LUID in UID_LIMIT.
993 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
994 before UID_LIMIT.
995
996 To check the entire block, set UID_LIMIT to max_uid + 1 and
997 AVAIL_P to 0. */
998
999 static int
1000 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1001 int avail_p)
1002 {
1003 vec<rtx> list = modify_mem_list[bb->index];
1004 rtx setter;
1005 unsigned ix;
1006
1007 /* If this is a readonly then we aren't going to be changing it. */
1008 if (MEM_READONLY_P (x))
1009 return 0;
1010
1011 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1012 {
1013 struct mem_conflict_info mci;
1014
1015 /* Ignore entries in the list that do not apply. */
1016 if ((avail_p
1017 && DF_INSN_LUID (setter) < uid_limit)
1018 || (! avail_p
1019 && DF_INSN_LUID (setter) > uid_limit))
1020 continue;
1021
1022 /* If SETTER is a call everything is clobbered. Note that calls
1023 to pure functions are never put on the list, so we need not
1024 worry about them. */
1025 if (CALL_P (setter))
1026 return 1;
1027
1028 /* SETTER must be an INSN of some kind that sets memory. Call
1029 note_stores to examine each hunk of memory that is modified. */
1030 mci.mem = x;
1031 mci.conflict = false;
1032 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1033 if (mci.conflict)
1034 return 1;
1035 }
1036 return 0;
1037 }
1038
1039 /* Return nonzero if the operands of expression X are unchanged from
1040 the start of INSN's basic block up to but not including INSN. */
1041
1042 static int
1043 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1044 {
1045 return oprs_unchanged_p (x, insn, 0);
1046 }
1047
1048 /* Return nonzero if the operands of expression X are unchanged from
1049 INSN to the end of INSN's basic block. */
1050
1051 static int
1052 oprs_available_p (const_rtx x, const_rtx insn)
1053 {
1054 return oprs_unchanged_p (x, insn, 1);
1055 }
1056
1057 /* Hash expression X.
1058
1059 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1060 indicating if a volatile operand is found or if the expression contains
1061 something we don't want to insert in the table. HASH_TABLE_SIZE is
1062 the current size of the hash table to be probed. */
1063
1064 static unsigned int
1065 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1066 int hash_table_size)
1067 {
1068 unsigned int hash;
1069
1070 *do_not_record_p = 0;
1071
1072 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1073 return hash % hash_table_size;
1074 }
1075
1076 /* Return nonzero if exp1 is equivalent to exp2. */
1077
1078 static int
1079 expr_equiv_p (const_rtx x, const_rtx y)
1080 {
1081 return exp_equiv_p (x, y, 0, true);
1082 }
1083
1084 /* Insert expression X in INSN in the hash TABLE.
1085 If it is already present, record it as the last occurrence in INSN's
1086 basic block.
1087
1088 MODE is the mode of the value X is being stored into.
1089 It is only used if X is a CONST_INT.
1090
1091 ANTIC_P is nonzero if X is an anticipatable expression.
1092 AVAIL_P is nonzero if X is an available expression.
1093
1094 MAX_DISTANCE is the maximum distance in instructions this expression can
1095 be moved. */
1096
1097 static void
1098 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1099 int avail_p, int max_distance, struct hash_table_d *table)
1100 {
1101 int found, do_not_record_p;
1102 unsigned int hash;
1103 struct expr *cur_expr, *last_expr = NULL;
1104 struct occr *antic_occr, *avail_occr;
1105
1106 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1107
1108 /* Do not insert expression in table if it contains volatile operands,
1109 or if hash_expr determines the expression is something we don't want
1110 to or can't handle. */
1111 if (do_not_record_p)
1112 return;
1113
1114 cur_expr = table->table[hash];
1115 found = 0;
1116
1117 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1118 {
1119 /* If the expression isn't found, save a pointer to the end of
1120 the list. */
1121 last_expr = cur_expr;
1122 cur_expr = cur_expr->next_same_hash;
1123 }
1124
1125 if (! found)
1126 {
1127 cur_expr = GOBNEW (struct expr);
1128 bytes_used += sizeof (struct expr);
1129 if (table->table[hash] == NULL)
1130 /* This is the first pattern that hashed to this index. */
1131 table->table[hash] = cur_expr;
1132 else
1133 /* Add EXPR to end of this hash chain. */
1134 last_expr->next_same_hash = cur_expr;
1135
1136 /* Set the fields of the expr element. */
1137 cur_expr->expr = x;
1138 cur_expr->bitmap_index = table->n_elems++;
1139 cur_expr->next_same_hash = NULL;
1140 cur_expr->antic_occr = NULL;
1141 cur_expr->avail_occr = NULL;
1142 gcc_assert (max_distance >= 0);
1143 cur_expr->max_distance = max_distance;
1144 }
1145 else
1146 gcc_assert (cur_expr->max_distance == max_distance);
1147
1148 /* Now record the occurrence(s). */
1149 if (antic_p)
1150 {
1151 antic_occr = cur_expr->antic_occr;
1152
1153 if (antic_occr
1154 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1155 antic_occr = NULL;
1156
1157 if (antic_occr)
1158 /* Found another instance of the expression in the same basic block.
1159 Prefer the currently recorded one. We want the first one in the
1160 block and the block is scanned from start to end. */
1161 ; /* nothing to do */
1162 else
1163 {
1164 /* First occurrence of this expression in this basic block. */
1165 antic_occr = GOBNEW (struct occr);
1166 bytes_used += sizeof (struct occr);
1167 antic_occr->insn = insn;
1168 antic_occr->next = cur_expr->antic_occr;
1169 antic_occr->deleted_p = 0;
1170 cur_expr->antic_occr = antic_occr;
1171 }
1172 }
1173
1174 if (avail_p)
1175 {
1176 avail_occr = cur_expr->avail_occr;
1177
1178 if (avail_occr
1179 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1180 {
1181 /* Found another instance of the expression in the same basic block.
1182 Prefer this occurrence to the currently recorded one. We want
1183 the last one in the block and the block is scanned from start
1184 to end. */
1185 avail_occr->insn = insn;
1186 }
1187 else
1188 {
1189 /* First occurrence of this expression in this basic block. */
1190 avail_occr = GOBNEW (struct occr);
1191 bytes_used += sizeof (struct occr);
1192 avail_occr->insn = insn;
1193 avail_occr->next = cur_expr->avail_occr;
1194 avail_occr->deleted_p = 0;
1195 cur_expr->avail_occr = avail_occr;
1196 }
1197 }
1198 }
1199
1200 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1201
1202 static void
1203 hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1204 {
1205 rtx src = SET_SRC (set);
1206 rtx dest = SET_DEST (set);
1207 rtx note;
1208
1209 if (GET_CODE (src) == CALL)
1210 hash_scan_call (src, insn, table);
1211
1212 else if (REG_P (dest))
1213 {
1214 unsigned int regno = REGNO (dest);
1215 int max_distance = 0;
1216
1217 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1218
1219 This allows us to do a single GCSE pass and still eliminate
1220 redundant constants, addresses or other expressions that are
1221 constructed with multiple instructions.
1222
1223 However, keep the original SRC if INSN is a simple reg-reg move.
1224 In this case, there will almost always be a REG_EQUAL note on the
1225 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1226 for INSN, we miss copy propagation opportunities and we perform the
1227 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1228 do more than one PRE GCSE pass.
1229
1230 Note that this does not impede profitable constant propagations. We
1231 "look through" reg-reg sets in lookup_avail_set. */
1232 note = find_reg_equal_equiv_note (insn);
1233 if (note != 0
1234 && REG_NOTE_KIND (note) == REG_EQUAL
1235 && !REG_P (src)
1236 && want_to_gcse_p (XEXP (note, 0), NULL))
1237 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1238
1239 /* Only record sets of pseudo-regs in the hash table. */
1240 if (regno >= FIRST_PSEUDO_REGISTER
1241 /* Don't GCSE something if we can't do a reg/reg copy. */
1242 && can_copy_p (GET_MODE (dest))
1243 /* GCSE commonly inserts instruction after the insn. We can't
1244 do that easily for EH edges so disable GCSE on these for now. */
1245 /* ??? We can now easily create new EH landing pads at the
1246 gimple level, for splitting edges; there's no reason we
1247 can't do the same thing at the rtl level. */
1248 && !can_throw_internal (insn)
1249 /* Is SET_SRC something we want to gcse? */
1250 && want_to_gcse_p (src, &max_distance)
1251 /* Don't CSE a nop. */
1252 && ! set_noop_p (set)
1253 /* Don't GCSE if it has attached REG_EQUIV note.
1254 At this point this only function parameters should have
1255 REG_EQUIV notes and if the argument slot is used somewhere
1256 explicitly, it means address of parameter has been taken,
1257 so we should not extend the lifetime of the pseudo. */
1258 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1259 {
1260 /* An expression is not anticipatable if its operands are
1261 modified before this insn or if this is not the only SET in
1262 this insn. The latter condition does not have to mean that
1263 SRC itself is not anticipatable, but we just will not be
1264 able to handle code motion of insns with multiple sets. */
1265 int antic_p = oprs_anticipatable_p (src, insn)
1266 && !multiple_sets (insn);
1267 /* An expression is not available if its operands are
1268 subsequently modified, including this insn. It's also not
1269 available if this is a branch, because we can't insert
1270 a set after the branch. */
1271 int avail_p = (oprs_available_p (src, insn)
1272 && ! JUMP_P (insn));
1273
1274 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1275 max_distance, table);
1276 }
1277 }
1278 /* In case of store we want to consider the memory value as available in
1279 the REG stored in that memory. This makes it possible to remove
1280 redundant loads from due to stores to the same location. */
1281 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1282 {
1283 unsigned int regno = REGNO (src);
1284 int max_distance = 0;
1285
1286 /* Only record sets of pseudo-regs in the hash table. */
1287 if (regno >= FIRST_PSEUDO_REGISTER
1288 /* Don't GCSE something if we can't do a reg/reg copy. */
1289 && can_copy_p (GET_MODE (src))
1290 /* GCSE commonly inserts instruction after the insn. We can't
1291 do that easily for EH edges so disable GCSE on these for now. */
1292 && !can_throw_internal (insn)
1293 /* Is SET_DEST something we want to gcse? */
1294 && want_to_gcse_p (dest, &max_distance)
1295 /* Don't CSE a nop. */
1296 && ! set_noop_p (set)
1297 /* Don't GCSE if it has attached REG_EQUIV note.
1298 At this point this only function parameters should have
1299 REG_EQUIV notes and if the argument slot is used somewhere
1300 explicitly, it means address of parameter has been taken,
1301 so we should not extend the lifetime of the pseudo. */
1302 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1303 || ! MEM_P (XEXP (note, 0))))
1304 {
1305 /* Stores are never anticipatable. */
1306 int antic_p = 0;
1307 /* An expression is not available if its operands are
1308 subsequently modified, including this insn. It's also not
1309 available if this is a branch, because we can't insert
1310 a set after the branch. */
1311 int avail_p = oprs_available_p (dest, insn)
1312 && ! JUMP_P (insn);
1313
1314 /* Record the memory expression (DEST) in the hash table. */
1315 insert_expr_in_table (dest, GET_MODE (dest), insn,
1316 antic_p, avail_p, max_distance, table);
1317 }
1318 }
1319 }
1320
1321 static void
1322 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1323 struct hash_table_d *table ATTRIBUTE_UNUSED)
1324 {
1325 /* Currently nothing to do. */
1326 }
1327
1328 static void
1329 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1330 struct hash_table_d *table ATTRIBUTE_UNUSED)
1331 {
1332 /* Currently nothing to do. */
1333 }
1334
1335 /* Process INSN and add hash table entries as appropriate. */
1336
1337 static void
1338 hash_scan_insn (rtx insn, struct hash_table_d *table)
1339 {
1340 rtx pat = PATTERN (insn);
1341 int i;
1342
1343 /* Pick out the sets of INSN and for other forms of instructions record
1344 what's been modified. */
1345
1346 if (GET_CODE (pat) == SET)
1347 hash_scan_set (pat, insn, table);
1348
1349 else if (GET_CODE (pat) == CLOBBER)
1350 hash_scan_clobber (pat, insn, table);
1351
1352 else if (GET_CODE (pat) == CALL)
1353 hash_scan_call (pat, insn, table);
1354
1355 else if (GET_CODE (pat) == PARALLEL)
1356 for (i = 0; i < XVECLEN (pat, 0); i++)
1357 {
1358 rtx x = XVECEXP (pat, 0, i);
1359
1360 if (GET_CODE (x) == SET)
1361 hash_scan_set (x, insn, table);
1362 else if (GET_CODE (x) == CLOBBER)
1363 hash_scan_clobber (x, insn, table);
1364 else if (GET_CODE (x) == CALL)
1365 hash_scan_call (x, insn, table);
1366 }
1367 }
1368
1369 /* Dump the hash table TABLE to file FILE under the name NAME. */
1370
1371 static void
1372 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1373 {
1374 int i;
1375 /* Flattened out table, so it's printed in proper order. */
1376 struct expr **flat_table;
1377 unsigned int *hash_val;
1378 struct expr *expr;
1379
1380 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1381 hash_val = XNEWVEC (unsigned int, table->n_elems);
1382
1383 for (i = 0; i < (int) table->size; i++)
1384 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1385 {
1386 flat_table[expr->bitmap_index] = expr;
1387 hash_val[expr->bitmap_index] = i;
1388 }
1389
1390 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1391 name, table->size, table->n_elems);
1392
1393 for (i = 0; i < (int) table->n_elems; i++)
1394 if (flat_table[i] != 0)
1395 {
1396 expr = flat_table[i];
1397 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1398 expr->bitmap_index, hash_val[i], expr->max_distance);
1399 print_rtl (file, expr->expr);
1400 fprintf (file, "\n");
1401 }
1402
1403 fprintf (file, "\n");
1404
1405 free (flat_table);
1406 free (hash_val);
1407 }
1408
1409 /* Record register first/last/block set information for REGNO in INSN.
1410
1411 first_set records the first place in the block where the register
1412 is set and is used to compute "anticipatability".
1413
1414 last_set records the last place in the block where the register
1415 is set and is used to compute "availability".
1416
1417 last_bb records the block for which first_set and last_set are
1418 valid, as a quick test to invalidate them. */
1419
1420 static void
1421 record_last_reg_set_info (rtx insn, int regno)
1422 {
1423 struct reg_avail_info *info = &reg_avail_info[regno];
1424 int luid = DF_INSN_LUID (insn);
1425
1426 info->last_set = luid;
1427 if (info->last_bb != current_bb)
1428 {
1429 info->last_bb = current_bb;
1430 info->first_set = luid;
1431 }
1432 }
1433
1434 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1435 Note we store a pair of elements in the list, so they have to be
1436 taken off pairwise. */
1437
1438 static void
1439 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1440 void * v_insn)
1441 {
1442 rtx dest_addr, insn;
1443 int bb;
1444 modify_pair pair;
1445
1446 while (GET_CODE (dest) == SUBREG
1447 || GET_CODE (dest) == ZERO_EXTRACT
1448 || GET_CODE (dest) == STRICT_LOW_PART)
1449 dest = XEXP (dest, 0);
1450
1451 /* If DEST is not a MEM, then it will not conflict with a load. Note
1452 that function calls are assumed to clobber memory, but are handled
1453 elsewhere. */
1454
1455 if (! MEM_P (dest))
1456 return;
1457
1458 dest_addr = get_addr (XEXP (dest, 0));
1459 dest_addr = canon_rtx (dest_addr);
1460 insn = (rtx) v_insn;
1461 bb = BLOCK_FOR_INSN (insn)->index;
1462
1463 pair.dest = dest;
1464 pair.dest_addr = dest_addr;
1465 canon_modify_mem_list[bb].safe_push (pair);
1466 }
1467
1468 /* Record memory modification information for INSN. We do not actually care
1469 about the memory location(s) that are set, or even how they are set (consider
1470 a CALL_INSN). We merely need to record which insns modify memory. */
1471
1472 static void
1473 record_last_mem_set_info (rtx insn)
1474 {
1475 int bb;
1476
1477 if (! flag_gcse_lm)
1478 return;
1479
1480 /* load_killed_in_block_p will handle the case of calls clobbering
1481 everything. */
1482 bb = BLOCK_FOR_INSN (insn)->index;
1483 modify_mem_list[bb].safe_push (insn);
1484 bitmap_set_bit (modify_mem_list_set, bb);
1485
1486 if (CALL_P (insn))
1487 bitmap_set_bit (blocks_with_calls, bb);
1488 else
1489 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1490 }
1491
1492 /* Called from compute_hash_table via note_stores to handle one
1493 SET or CLOBBER in an insn. DATA is really the instruction in which
1494 the SET is taking place. */
1495
1496 static void
1497 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1498 {
1499 rtx last_set_insn = (rtx) data;
1500
1501 if (GET_CODE (dest) == SUBREG)
1502 dest = SUBREG_REG (dest);
1503
1504 if (REG_P (dest))
1505 record_last_reg_set_info (last_set_insn, REGNO (dest));
1506 else if (MEM_P (dest)
1507 /* Ignore pushes, they clobber nothing. */
1508 && ! push_operand (dest, GET_MODE (dest)))
1509 record_last_mem_set_info (last_set_insn);
1510 }
1511
1512 /* Top level function to create an expression hash table.
1513
1514 Expression entries are placed in the hash table if
1515 - they are of the form (set (pseudo-reg) src),
1516 - src is something we want to perform GCSE on,
1517 - none of the operands are subsequently modified in the block
1518
1519 Currently src must be a pseudo-reg or a const_int.
1520
1521 TABLE is the table computed. */
1522
1523 static void
1524 compute_hash_table_work (struct hash_table_d *table)
1525 {
1526 int i;
1527
1528 /* re-Cache any INSN_LIST nodes we have allocated. */
1529 clear_modify_mem_tables ();
1530 /* Some working arrays used to track first and last set in each block. */
1531 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1532
1533 for (i = 0; i < max_reg_num (); ++i)
1534 reg_avail_info[i].last_bb = NULL;
1535
1536 FOR_EACH_BB (current_bb)
1537 {
1538 rtx insn;
1539 unsigned int regno;
1540
1541 /* First pass over the instructions records information used to
1542 determine when registers and memory are first and last set. */
1543 FOR_BB_INSNS (current_bb, insn)
1544 {
1545 if (!NONDEBUG_INSN_P (insn))
1546 continue;
1547
1548 if (CALL_P (insn))
1549 {
1550 hard_reg_set_iterator hrsi;
1551 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1552 0, regno, hrsi)
1553 record_last_reg_set_info (insn, regno);
1554
1555 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1556 record_last_mem_set_info (insn);
1557 }
1558
1559 note_stores (PATTERN (insn), record_last_set_info, insn);
1560 }
1561
1562 /* The next pass builds the hash table. */
1563 FOR_BB_INSNS (current_bb, insn)
1564 if (NONDEBUG_INSN_P (insn))
1565 hash_scan_insn (insn, table);
1566 }
1567
1568 free (reg_avail_info);
1569 reg_avail_info = NULL;
1570 }
1571
1572 /* Allocate space for the set/expr hash TABLE.
1573 It is used to determine the number of buckets to use. */
1574
1575 static void
1576 alloc_hash_table (struct hash_table_d *table)
1577 {
1578 int n;
1579
1580 n = get_max_insn_count ();
1581
1582 table->size = n / 4;
1583 if (table->size < 11)
1584 table->size = 11;
1585
1586 /* Attempt to maintain efficient use of hash table.
1587 Making it an odd number is simplest for now.
1588 ??? Later take some measurements. */
1589 table->size |= 1;
1590 n = table->size * sizeof (struct expr *);
1591 table->table = GNEWVAR (struct expr *, n);
1592 }
1593
1594 /* Free things allocated by alloc_hash_table. */
1595
1596 static void
1597 free_hash_table (struct hash_table_d *table)
1598 {
1599 free (table->table);
1600 }
1601
1602 /* Compute the expression hash table TABLE. */
1603
1604 static void
1605 compute_hash_table (struct hash_table_d *table)
1606 {
1607 /* Initialize count of number of entries in hash table. */
1608 table->n_elems = 0;
1609 memset (table->table, 0, table->size * sizeof (struct expr *));
1610
1611 compute_hash_table_work (table);
1612 }
1613 \f
1614 /* Expression tracking support. */
1615
1616 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1617 static void
1618 clear_modify_mem_tables (void)
1619 {
1620 unsigned i;
1621 bitmap_iterator bi;
1622
1623 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1624 {
1625 modify_mem_list[i].release ();
1626 canon_modify_mem_list[i].release ();
1627 }
1628 bitmap_clear (modify_mem_list_set);
1629 bitmap_clear (blocks_with_calls);
1630 }
1631
1632 /* Release memory used by modify_mem_list_set. */
1633
1634 static void
1635 free_modify_mem_tables (void)
1636 {
1637 clear_modify_mem_tables ();
1638 free (modify_mem_list);
1639 free (canon_modify_mem_list);
1640 modify_mem_list = 0;
1641 canon_modify_mem_list = 0;
1642 }
1643 \f
1644 /* For each block, compute whether X is transparent. X is either an
1645 expression or an assignment [though we don't care which, for this context
1646 an assignment is treated as an expression]. For each block where an
1647 element of X is modified, reset the INDX bit in BMAP. */
1648
1649 static void
1650 compute_transp (const_rtx x, int indx, sbitmap *bmap)
1651 {
1652 int i, j;
1653 enum rtx_code code;
1654 const char *fmt;
1655
1656 /* repeat is used to turn tail-recursion into iteration since GCC
1657 can't do it when there's no return value. */
1658 repeat:
1659
1660 if (x == 0)
1661 return;
1662
1663 code = GET_CODE (x);
1664 switch (code)
1665 {
1666 case REG:
1667 {
1668 df_ref def;
1669 for (def = DF_REG_DEF_CHAIN (REGNO (x));
1670 def;
1671 def = DF_REF_NEXT_REG (def))
1672 bitmap_clear_bit (bmap[DF_REF_BB (def)->index], indx);
1673 }
1674
1675 return;
1676
1677 case MEM:
1678 if (! MEM_READONLY_P (x))
1679 {
1680 bitmap_iterator bi;
1681 unsigned bb_index;
1682 rtx x_addr;
1683
1684 x_addr = get_addr (XEXP (x, 0));
1685 x_addr = canon_rtx (x_addr);
1686
1687 /* First handle all the blocks with calls. We don't need to
1688 do any list walking for them. */
1689 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
1690 {
1691 bitmap_clear_bit (bmap[bb_index], indx);
1692 }
1693
1694 /* Now iterate over the blocks which have memory modifications
1695 but which do not have any calls. */
1696 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
1697 blocks_with_calls,
1698 0, bb_index, bi)
1699 {
1700 vec<modify_pair> list
1701 = canon_modify_mem_list[bb_index];
1702 modify_pair *pair;
1703 unsigned ix;
1704
1705 FOR_EACH_VEC_ELT_REVERSE (list, ix, pair)
1706 {
1707 rtx dest = pair->dest;
1708 rtx dest_addr = pair->dest_addr;
1709
1710 if (canon_true_dependence (dest, GET_MODE (dest),
1711 dest_addr, x, x_addr))
1712 bitmap_clear_bit (bmap[bb_index], indx);
1713 }
1714 }
1715 }
1716
1717 x = XEXP (x, 0);
1718 goto repeat;
1719
1720 case PC:
1721 case CC0: /*FIXME*/
1722 case CONST:
1723 CASE_CONST_ANY:
1724 case SYMBOL_REF:
1725 case LABEL_REF:
1726 case ADDR_VEC:
1727 case ADDR_DIFF_VEC:
1728 return;
1729
1730 default:
1731 break;
1732 }
1733
1734 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1735 {
1736 if (fmt[i] == 'e')
1737 {
1738 /* If we are about to do the last recursive call
1739 needed at this level, change it into iteration.
1740 This function is called enough to be worth it. */
1741 if (i == 0)
1742 {
1743 x = XEXP (x, i);
1744 goto repeat;
1745 }
1746
1747 compute_transp (XEXP (x, i), indx, bmap);
1748 }
1749 else if (fmt[i] == 'E')
1750 for (j = 0; j < XVECLEN (x, i); j++)
1751 compute_transp (XVECEXP (x, i, j), indx, bmap);
1752 }
1753 }
1754 \f
1755 /* Compute PRE+LCM working variables. */
1756
1757 /* Local properties of expressions. */
1758
1759 /* Nonzero for expressions that are transparent in the block. */
1760 static sbitmap *transp;
1761
1762 /* Nonzero for expressions that are computed (available) in the block. */
1763 static sbitmap *comp;
1764
1765 /* Nonzero for expressions that are locally anticipatable in the block. */
1766 static sbitmap *antloc;
1767
1768 /* Nonzero for expressions where this block is an optimal computation
1769 point. */
1770 static sbitmap *pre_optimal;
1771
1772 /* Nonzero for expressions which are redundant in a particular block. */
1773 static sbitmap *pre_redundant;
1774
1775 /* Nonzero for expressions which should be inserted on a specific edge. */
1776 static sbitmap *pre_insert_map;
1777
1778 /* Nonzero for expressions which should be deleted in a specific block. */
1779 static sbitmap *pre_delete_map;
1780
1781 /* Allocate vars used for PRE analysis. */
1782
1783 static void
1784 alloc_pre_mem (int n_blocks, int n_exprs)
1785 {
1786 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1787 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1788 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1789
1790 pre_optimal = NULL;
1791 pre_redundant = NULL;
1792 pre_insert_map = NULL;
1793 pre_delete_map = NULL;
1794 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1795
1796 /* pre_insert and pre_delete are allocated later. */
1797 }
1798
1799 /* Free vars used for PRE analysis. */
1800
1801 static void
1802 free_pre_mem (void)
1803 {
1804 sbitmap_vector_free (transp);
1805 sbitmap_vector_free (comp);
1806
1807 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1808
1809 if (pre_optimal)
1810 sbitmap_vector_free (pre_optimal);
1811 if (pre_redundant)
1812 sbitmap_vector_free (pre_redundant);
1813 if (pre_insert_map)
1814 sbitmap_vector_free (pre_insert_map);
1815 if (pre_delete_map)
1816 sbitmap_vector_free (pre_delete_map);
1817
1818 transp = comp = NULL;
1819 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1820 }
1821
1822 /* Remove certain expressions from anticipatable and transparent
1823 sets of basic blocks that have incoming abnormal edge.
1824 For PRE remove potentially trapping expressions to avoid placing
1825 them on abnormal edges. For hoisting remove memory references that
1826 can be clobbered by calls. */
1827
1828 static void
1829 prune_expressions (bool pre_p)
1830 {
1831 sbitmap prune_exprs;
1832 struct expr *expr;
1833 unsigned int ui;
1834 basic_block bb;
1835
1836 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1837 bitmap_clear (prune_exprs);
1838 for (ui = 0; ui < expr_hash_table.size; ui++)
1839 {
1840 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1841 {
1842 /* Note potentially trapping expressions. */
1843 if (may_trap_p (expr->expr))
1844 {
1845 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1846 continue;
1847 }
1848
1849 if (!pre_p && MEM_P (expr->expr))
1850 /* Note memory references that can be clobbered by a call.
1851 We do not split abnormal edges in hoisting, so would
1852 a memory reference get hoisted along an abnormal edge,
1853 it would be placed /before/ the call. Therefore, only
1854 constant memory references can be hoisted along abnormal
1855 edges. */
1856 {
1857 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1858 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1859 continue;
1860
1861 if (MEM_READONLY_P (expr->expr)
1862 && !MEM_VOLATILE_P (expr->expr)
1863 && MEM_NOTRAP_P (expr->expr))
1864 /* Constant memory reference, e.g., a PIC address. */
1865 continue;
1866
1867 /* ??? Optimally, we would use interprocedural alias
1868 analysis to determine if this mem is actually killed
1869 by this call. */
1870
1871 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1872 }
1873 }
1874 }
1875
1876 FOR_EACH_BB (bb)
1877 {
1878 edge e;
1879 edge_iterator ei;
1880
1881 /* If the current block is the destination of an abnormal edge, we
1882 kill all trapping (for PRE) and memory (for hoist) expressions
1883 because we won't be able to properly place the instruction on
1884 the edge. So make them neither anticipatable nor transparent.
1885 This is fairly conservative.
1886
1887 ??? For hoisting it may be necessary to check for set-and-jump
1888 instructions here, not just for abnormal edges. The general problem
1889 is that when an expression cannot not be placed right at the end of
1890 a basic block we should account for any side-effects of a subsequent
1891 jump instructions that could clobber the expression. It would
1892 be best to implement this check along the lines of
1893 should_hoist_expr_to_dom where the target block is already known
1894 and, hence, there's no need to conservatively prune expressions on
1895 "intermediate" set-and-jump instructions. */
1896 FOR_EACH_EDGE (e, ei, bb->preds)
1897 if ((e->flags & EDGE_ABNORMAL)
1898 && (pre_p || CALL_P (BB_END (e->src))))
1899 {
1900 bitmap_and_compl (antloc[bb->index],
1901 antloc[bb->index], prune_exprs);
1902 bitmap_and_compl (transp[bb->index],
1903 transp[bb->index], prune_exprs);
1904 break;
1905 }
1906 }
1907
1908 sbitmap_free (prune_exprs);
1909 }
1910
1911 /* It may be necessary to insert a large number of insns on edges to
1912 make the existing occurrences of expressions fully redundant. This
1913 routine examines the set of insertions and deletions and if the ratio
1914 of insertions to deletions is too high for a particular expression, then
1915 the expression is removed from the insertion/deletion sets.
1916
1917 N_ELEMS is the number of elements in the hash table. */
1918
1919 static void
1920 prune_insertions_deletions (int n_elems)
1921 {
1922 sbitmap_iterator sbi;
1923 sbitmap prune_exprs;
1924
1925 /* We always use I to iterate over blocks/edges and J to iterate over
1926 expressions. */
1927 unsigned int i, j;
1928
1929 /* Counts for the number of times an expression needs to be inserted and
1930 number of times an expression can be removed as a result. */
1931 int *insertions = GCNEWVEC (int, n_elems);
1932 int *deletions = GCNEWVEC (int, n_elems);
1933
1934 /* Set of expressions which require too many insertions relative to
1935 the number of deletions achieved. We will prune these out of the
1936 insertion/deletion sets. */
1937 prune_exprs = sbitmap_alloc (n_elems);
1938 bitmap_clear (prune_exprs);
1939
1940 /* Iterate over the edges counting the number of times each expression
1941 needs to be inserted. */
1942 for (i = 0; i < (unsigned) n_edges; i++)
1943 {
1944 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1945 insertions[j]++;
1946 }
1947
1948 /* Similarly for deletions, but those occur in blocks rather than on
1949 edges. */
1950 for (i = 0; i < (unsigned) last_basic_block; i++)
1951 {
1952 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1953 deletions[j]++;
1954 }
1955
1956 /* Now that we have accurate counts, iterate over the elements in the
1957 hash table and see if any need too many insertions relative to the
1958 number of evaluations that can be removed. If so, mark them in
1959 PRUNE_EXPRS. */
1960 for (j = 0; j < (unsigned) n_elems; j++)
1961 if (deletions[j]
1962 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1963 bitmap_set_bit (prune_exprs, j);
1964
1965 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1966 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1967 {
1968 for (i = 0; i < (unsigned) n_edges; i++)
1969 bitmap_clear_bit (pre_insert_map[i], j);
1970
1971 for (i = 0; i < (unsigned) last_basic_block; i++)
1972 bitmap_clear_bit (pre_delete_map[i], j);
1973 }
1974
1975 sbitmap_free (prune_exprs);
1976 free (insertions);
1977 free (deletions);
1978 }
1979
1980 /* Top level routine to do the dataflow analysis needed by PRE. */
1981
1982 static struct edge_list *
1983 compute_pre_data (void)
1984 {
1985 struct edge_list *edge_list;
1986 basic_block bb;
1987
1988 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1989 prune_expressions (true);
1990 bitmap_vector_clear (ae_kill, last_basic_block);
1991
1992 /* Compute ae_kill for each basic block using:
1993
1994 ~(TRANSP | COMP)
1995 */
1996
1997 FOR_EACH_BB (bb)
1998 {
1999 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
2000 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
2001 }
2002
2003 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
2004 ae_kill, &pre_insert_map, &pre_delete_map);
2005 sbitmap_vector_free (antloc);
2006 antloc = NULL;
2007 sbitmap_vector_free (ae_kill);
2008 ae_kill = NULL;
2009
2010 prune_insertions_deletions (expr_hash_table.n_elems);
2011
2012 return edge_list;
2013 }
2014 \f
2015 /* PRE utilities */
2016
2017 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
2018 block BB.
2019
2020 VISITED is a pointer to a working buffer for tracking which BB's have
2021 been visited. It is NULL for the top-level call.
2022
2023 We treat reaching expressions that go through blocks containing the same
2024 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
2025 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2026 2 as not reaching. The intent is to improve the probability of finding
2027 only one reaching expression and to reduce register lifetimes by picking
2028 the closest such expression. */
2029
2030 static int
2031 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
2032 basic_block bb, char *visited)
2033 {
2034 edge pred;
2035 edge_iterator ei;
2036
2037 FOR_EACH_EDGE (pred, ei, bb->preds)
2038 {
2039 basic_block pred_bb = pred->src;
2040
2041 if (pred->src == ENTRY_BLOCK_PTR
2042 /* Has predecessor has already been visited? */
2043 || visited[pred_bb->index])
2044 ;/* Nothing to do. */
2045
2046 /* Does this predecessor generate this expression? */
2047 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
2048 {
2049 /* Is this the occurrence we're looking for?
2050 Note that there's only one generating occurrence per block
2051 so we just need to check the block number. */
2052 if (occr_bb == pred_bb)
2053 return 1;
2054
2055 visited[pred_bb->index] = 1;
2056 }
2057 /* Ignore this predecessor if it kills the expression. */
2058 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2059 visited[pred_bb->index] = 1;
2060
2061 /* Neither gen nor kill. */
2062 else
2063 {
2064 visited[pred_bb->index] = 1;
2065 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2066 return 1;
2067 }
2068 }
2069
2070 /* All paths have been checked. */
2071 return 0;
2072 }
2073
2074 /* The wrapper for pre_expr_reaches_here_work that ensures that any
2075 memory allocated for that function is returned. */
2076
2077 static int
2078 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2079 {
2080 int rval;
2081 char *visited = XCNEWVEC (char, last_basic_block);
2082
2083 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2084
2085 free (visited);
2086 return rval;
2087 }
2088 \f
2089 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2090
2091 static rtx
2092 process_insert_insn (struct expr *expr)
2093 {
2094 rtx reg = expr->reaching_reg;
2095 /* Copy the expression to make sure we don't have any sharing issues. */
2096 rtx exp = copy_rtx (expr->expr);
2097 rtx pat;
2098
2099 start_sequence ();
2100
2101 /* If the expression is something that's an operand, like a constant,
2102 just copy it to a register. */
2103 if (general_operand (exp, GET_MODE (reg)))
2104 emit_move_insn (reg, exp);
2105
2106 /* Otherwise, make a new insn to compute this expression and make sure the
2107 insn will be recognized (this also adds any needed CLOBBERs). */
2108 else
2109 {
2110 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2111
2112 if (insn_invalid_p (insn, false))
2113 gcc_unreachable ();
2114 }
2115
2116 pat = get_insns ();
2117 end_sequence ();
2118
2119 return pat;
2120 }
2121
2122 /* Add EXPR to the end of basic block BB.
2123
2124 This is used by both the PRE and code hoisting. */
2125
2126 static void
2127 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2128 {
2129 rtx insn = BB_END (bb);
2130 rtx new_insn;
2131 rtx reg = expr->reaching_reg;
2132 int regno = REGNO (reg);
2133 rtx pat, pat_end;
2134
2135 pat = process_insert_insn (expr);
2136 gcc_assert (pat && INSN_P (pat));
2137
2138 pat_end = pat;
2139 while (NEXT_INSN (pat_end) != NULL_RTX)
2140 pat_end = NEXT_INSN (pat_end);
2141
2142 /* If the last insn is a jump, insert EXPR in front [taking care to
2143 handle cc0, etc. properly]. Similarly we need to care trapping
2144 instructions in presence of non-call exceptions. */
2145
2146 if (JUMP_P (insn)
2147 || (NONJUMP_INSN_P (insn)
2148 && (!single_succ_p (bb)
2149 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2150 {
2151 #ifdef HAVE_cc0
2152 rtx note;
2153 #endif
2154
2155 /* If this is a jump table, then we can't insert stuff here. Since
2156 we know the previous real insn must be the tablejump, we insert
2157 the new instruction just before the tablejump. */
2158 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
2159 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
2160 insn = prev_active_insn (insn);
2161
2162 #ifdef HAVE_cc0
2163 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2164 if cc0 isn't set. */
2165 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2166 if (note)
2167 insn = XEXP (note, 0);
2168 else
2169 {
2170 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
2171 if (maybe_cc0_setter
2172 && INSN_P (maybe_cc0_setter)
2173 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2174 insn = maybe_cc0_setter;
2175 }
2176 #endif
2177 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2178 new_insn = emit_insn_before_noloc (pat, insn, bb);
2179 }
2180
2181 /* Likewise if the last insn is a call, as will happen in the presence
2182 of exception handling. */
2183 else if (CALL_P (insn)
2184 && (!single_succ_p (bb)
2185 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2186 {
2187 /* Keeping in mind targets with small register classes and parameters
2188 in registers, we search backward and place the instructions before
2189 the first parameter is loaded. Do this for everyone for consistency
2190 and a presumption that we'll get better code elsewhere as well. */
2191
2192 /* Since different machines initialize their parameter registers
2193 in different orders, assume nothing. Collect the set of all
2194 parameter registers. */
2195 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2196
2197 /* If we found all the parameter loads, then we want to insert
2198 before the first parameter load.
2199
2200 If we did not find all the parameter loads, then we might have
2201 stopped on the head of the block, which could be a CODE_LABEL.
2202 If we inserted before the CODE_LABEL, then we would be putting
2203 the insn in the wrong basic block. In that case, put the insn
2204 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2205 while (LABEL_P (insn)
2206 || NOTE_INSN_BASIC_BLOCK_P (insn))
2207 insn = NEXT_INSN (insn);
2208
2209 new_insn = emit_insn_before_noloc (pat, insn, bb);
2210 }
2211 else
2212 new_insn = emit_insn_after_noloc (pat, insn, bb);
2213
2214 while (1)
2215 {
2216 if (INSN_P (pat))
2217 add_label_notes (PATTERN (pat), new_insn);
2218 if (pat == pat_end)
2219 break;
2220 pat = NEXT_INSN (pat);
2221 }
2222
2223 gcse_create_count++;
2224
2225 if (dump_file)
2226 {
2227 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2228 bb->index, INSN_UID (new_insn));
2229 fprintf (dump_file, "copying expression %d to reg %d\n",
2230 expr->bitmap_index, regno);
2231 }
2232 }
2233
2234 /* Insert partially redundant expressions on edges in the CFG to make
2235 the expressions fully redundant. */
2236
2237 static int
2238 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2239 {
2240 int e, i, j, num_edges, set_size, did_insert = 0;
2241 sbitmap *inserted;
2242
2243 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2244 if it reaches any of the deleted expressions. */
2245
2246 set_size = pre_insert_map[0]->size;
2247 num_edges = NUM_EDGES (edge_list);
2248 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2249 bitmap_vector_clear (inserted, num_edges);
2250
2251 for (e = 0; e < num_edges; e++)
2252 {
2253 int indx;
2254 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2255
2256 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2257 {
2258 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2259
2260 for (j = indx;
2261 insert && j < (int) expr_hash_table.n_elems;
2262 j++, insert >>= 1)
2263 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2264 {
2265 struct expr *expr = index_map[j];
2266 struct occr *occr;
2267
2268 /* Now look at each deleted occurrence of this expression. */
2269 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2270 {
2271 if (! occr->deleted_p)
2272 continue;
2273
2274 /* Insert this expression on this edge if it would
2275 reach the deleted occurrence in BB. */
2276 if (!bitmap_bit_p (inserted[e], j))
2277 {
2278 rtx insn;
2279 edge eg = INDEX_EDGE (edge_list, e);
2280
2281 /* We can't insert anything on an abnormal and
2282 critical edge, so we insert the insn at the end of
2283 the previous block. There are several alternatives
2284 detailed in Morgans book P277 (sec 10.5) for
2285 handling this situation. This one is easiest for
2286 now. */
2287
2288 if (eg->flags & EDGE_ABNORMAL)
2289 insert_insn_end_basic_block (index_map[j], bb);
2290 else
2291 {
2292 insn = process_insert_insn (index_map[j]);
2293 insert_insn_on_edge (insn, eg);
2294 }
2295
2296 if (dump_file)
2297 {
2298 fprintf (dump_file, "PRE: edge (%d,%d), ",
2299 bb->index,
2300 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2301 fprintf (dump_file, "copy expression %d\n",
2302 expr->bitmap_index);
2303 }
2304
2305 update_ld_motion_stores (expr);
2306 bitmap_set_bit (inserted[e], j);
2307 did_insert = 1;
2308 gcse_create_count++;
2309 }
2310 }
2311 }
2312 }
2313 }
2314
2315 sbitmap_vector_free (inserted);
2316 return did_insert;
2317 }
2318
2319 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2320 Given "old_reg <- expr" (INSN), instead of adding after it
2321 reaching_reg <- old_reg
2322 it's better to do the following:
2323 reaching_reg <- expr
2324 old_reg <- reaching_reg
2325 because this way copy propagation can discover additional PRE
2326 opportunities. But if this fails, we try the old way.
2327 When "expr" is a store, i.e.
2328 given "MEM <- old_reg", instead of adding after it
2329 reaching_reg <- old_reg
2330 it's better to add it before as follows:
2331 reaching_reg <- old_reg
2332 MEM <- reaching_reg. */
2333
2334 static void
2335 pre_insert_copy_insn (struct expr *expr, rtx insn)
2336 {
2337 rtx reg = expr->reaching_reg;
2338 int regno = REGNO (reg);
2339 int indx = expr->bitmap_index;
2340 rtx pat = PATTERN (insn);
2341 rtx set, first_set, new_insn;
2342 rtx old_reg;
2343 int i;
2344
2345 /* This block matches the logic in hash_scan_insn. */
2346 switch (GET_CODE (pat))
2347 {
2348 case SET:
2349 set = pat;
2350 break;
2351
2352 case PARALLEL:
2353 /* Search through the parallel looking for the set whose
2354 source was the expression that we're interested in. */
2355 first_set = NULL_RTX;
2356 set = NULL_RTX;
2357 for (i = 0; i < XVECLEN (pat, 0); i++)
2358 {
2359 rtx x = XVECEXP (pat, 0, i);
2360 if (GET_CODE (x) == SET)
2361 {
2362 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2363 may not find an equivalent expression, but in this
2364 case the PARALLEL will have a single set. */
2365 if (first_set == NULL_RTX)
2366 first_set = x;
2367 if (expr_equiv_p (SET_SRC (x), expr->expr))
2368 {
2369 set = x;
2370 break;
2371 }
2372 }
2373 }
2374
2375 gcc_assert (first_set);
2376 if (set == NULL_RTX)
2377 set = first_set;
2378 break;
2379
2380 default:
2381 gcc_unreachable ();
2382 }
2383
2384 if (REG_P (SET_DEST (set)))
2385 {
2386 old_reg = SET_DEST (set);
2387 /* Check if we can modify the set destination in the original insn. */
2388 if (validate_change (insn, &SET_DEST (set), reg, 0))
2389 {
2390 new_insn = gen_move_insn (old_reg, reg);
2391 new_insn = emit_insn_after (new_insn, insn);
2392 }
2393 else
2394 {
2395 new_insn = gen_move_insn (reg, old_reg);
2396 new_insn = emit_insn_after (new_insn, insn);
2397 }
2398 }
2399 else /* This is possible only in case of a store to memory. */
2400 {
2401 old_reg = SET_SRC (set);
2402 new_insn = gen_move_insn (reg, old_reg);
2403
2404 /* Check if we can modify the set source in the original insn. */
2405 if (validate_change (insn, &SET_SRC (set), reg, 0))
2406 new_insn = emit_insn_before (new_insn, insn);
2407 else
2408 new_insn = emit_insn_after (new_insn, insn);
2409 }
2410
2411 gcse_create_count++;
2412
2413 if (dump_file)
2414 fprintf (dump_file,
2415 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2416 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2417 INSN_UID (insn), regno);
2418 }
2419
2420 /* Copy available expressions that reach the redundant expression
2421 to `reaching_reg'. */
2422
2423 static void
2424 pre_insert_copies (void)
2425 {
2426 unsigned int i, added_copy;
2427 struct expr *expr;
2428 struct occr *occr;
2429 struct occr *avail;
2430
2431 /* For each available expression in the table, copy the result to
2432 `reaching_reg' if the expression reaches a deleted one.
2433
2434 ??? The current algorithm is rather brute force.
2435 Need to do some profiling. */
2436
2437 for (i = 0; i < expr_hash_table.size; i++)
2438 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2439 {
2440 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2441 we don't want to insert a copy here because the expression may not
2442 really be redundant. So only insert an insn if the expression was
2443 deleted. This test also avoids further processing if the
2444 expression wasn't deleted anywhere. */
2445 if (expr->reaching_reg == NULL)
2446 continue;
2447
2448 /* Set when we add a copy for that expression. */
2449 added_copy = 0;
2450
2451 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2452 {
2453 if (! occr->deleted_p)
2454 continue;
2455
2456 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2457 {
2458 rtx insn = avail->insn;
2459
2460 /* No need to handle this one if handled already. */
2461 if (avail->copied_p)
2462 continue;
2463
2464 /* Don't handle this one if it's a redundant one. */
2465 if (INSN_DELETED_P (insn))
2466 continue;
2467
2468 /* Or if the expression doesn't reach the deleted one. */
2469 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2470 expr,
2471 BLOCK_FOR_INSN (occr->insn)))
2472 continue;
2473
2474 added_copy = 1;
2475
2476 /* Copy the result of avail to reaching_reg. */
2477 pre_insert_copy_insn (expr, insn);
2478 avail->copied_p = 1;
2479 }
2480 }
2481
2482 if (added_copy)
2483 update_ld_motion_stores (expr);
2484 }
2485 }
2486
2487 /* Emit move from SRC to DEST noting the equivalence with expression computed
2488 in INSN. */
2489
2490 static rtx
2491 gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2492 {
2493 rtx new_rtx;
2494 rtx set = single_set (insn), set2;
2495 rtx note;
2496 rtx eqv = NULL_RTX;
2497
2498 /* This should never fail since we're creating a reg->reg copy
2499 we've verified to be valid. */
2500
2501 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2502
2503 /* Note the equivalence for local CSE pass. Take the note from the old
2504 set if there was one. Otherwise record the SET_SRC from the old set
2505 unless DEST is also an operand of the SET_SRC. */
2506 set2 = single_set (new_rtx);
2507 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2508 return new_rtx;
2509 if ((note = find_reg_equal_equiv_note (insn)))
2510 eqv = XEXP (note, 0);
2511 else if (! REG_P (dest)
2512 || ! reg_mentioned_p (dest, SET_SRC (set)))
2513 eqv = SET_SRC (set);
2514
2515 if (eqv != NULL_RTX)
2516 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2517
2518 return new_rtx;
2519 }
2520
2521 /* Delete redundant computations.
2522 Deletion is done by changing the insn to copy the `reaching_reg' of
2523 the expression into the result of the SET. It is left to later passes
2524 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
2525
2526 Return nonzero if a change is made. */
2527
2528 static int
2529 pre_delete (void)
2530 {
2531 unsigned int i;
2532 int changed;
2533 struct expr *expr;
2534 struct occr *occr;
2535
2536 changed = 0;
2537 for (i = 0; i < expr_hash_table.size; i++)
2538 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2539 {
2540 int indx = expr->bitmap_index;
2541
2542 /* We only need to search antic_occr since we require ANTLOC != 0. */
2543 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2544 {
2545 rtx insn = occr->insn;
2546 rtx set;
2547 basic_block bb = BLOCK_FOR_INSN (insn);
2548
2549 /* We only delete insns that have a single_set. */
2550 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2551 && (set = single_set (insn)) != 0
2552 && dbg_cnt (pre_insn))
2553 {
2554 /* Create a pseudo-reg to store the result of reaching
2555 expressions into. Get the mode for the new pseudo from
2556 the mode of the original destination pseudo. */
2557 if (expr->reaching_reg == NULL)
2558 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2559
2560 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2561 delete_insn (insn);
2562 occr->deleted_p = 1;
2563 changed = 1;
2564 gcse_subst_count++;
2565
2566 if (dump_file)
2567 {
2568 fprintf (dump_file,
2569 "PRE: redundant insn %d (expression %d) in ",
2570 INSN_UID (insn), indx);
2571 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2572 bb->index, REGNO (expr->reaching_reg));
2573 }
2574 }
2575 }
2576 }
2577
2578 return changed;
2579 }
2580
2581 /* Perform GCSE optimizations using PRE.
2582 This is called by one_pre_gcse_pass after all the dataflow analysis
2583 has been done.
2584
2585 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2586 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2587 Compiler Design and Implementation.
2588
2589 ??? A new pseudo reg is created to hold the reaching expression. The nice
2590 thing about the classical approach is that it would try to use an existing
2591 reg. If the register can't be adequately optimized [i.e. we introduce
2592 reload problems], one could add a pass here to propagate the new register
2593 through the block.
2594
2595 ??? We don't handle single sets in PARALLELs because we're [currently] not
2596 able to copy the rest of the parallel when we insert copies to create full
2597 redundancies from partial redundancies. However, there's no reason why we
2598 can't handle PARALLELs in the cases where there are no partial
2599 redundancies. */
2600
2601 static int
2602 pre_gcse (struct edge_list *edge_list)
2603 {
2604 unsigned int i;
2605 int did_insert, changed;
2606 struct expr **index_map;
2607 struct expr *expr;
2608
2609 /* Compute a mapping from expression number (`bitmap_index') to
2610 hash table entry. */
2611
2612 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2613 for (i = 0; i < expr_hash_table.size; i++)
2614 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2615 index_map[expr->bitmap_index] = expr;
2616
2617 /* Delete the redundant insns first so that
2618 - we know what register to use for the new insns and for the other
2619 ones with reaching expressions
2620 - we know which insns are redundant when we go to create copies */
2621
2622 changed = pre_delete ();
2623 did_insert = pre_edge_insert (edge_list, index_map);
2624
2625 /* In other places with reaching expressions, copy the expression to the
2626 specially allocated pseudo-reg that reaches the redundant expr. */
2627 pre_insert_copies ();
2628 if (did_insert)
2629 {
2630 commit_edge_insertions ();
2631 changed = 1;
2632 }
2633
2634 free (index_map);
2635 return changed;
2636 }
2637
2638 /* Top level routine to perform one PRE GCSE pass.
2639
2640 Return nonzero if a change was made. */
2641
2642 static int
2643 one_pre_gcse_pass (void)
2644 {
2645 int changed = 0;
2646
2647 gcse_subst_count = 0;
2648 gcse_create_count = 0;
2649
2650 /* Return if there's nothing to do, or it is too expensive. */
2651 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
2652 || is_too_expensive (_("PRE disabled")))
2653 return 0;
2654
2655 /* We need alias. */
2656 init_alias_analysis ();
2657
2658 bytes_used = 0;
2659 gcc_obstack_init (&gcse_obstack);
2660 alloc_gcse_mem ();
2661
2662 alloc_hash_table (&expr_hash_table);
2663 add_noreturn_fake_exit_edges ();
2664 if (flag_gcse_lm)
2665 compute_ld_motion_mems ();
2666
2667 compute_hash_table (&expr_hash_table);
2668 if (flag_gcse_lm)
2669 trim_ld_motion_mems ();
2670 if (dump_file)
2671 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2672
2673 if (expr_hash_table.n_elems > 0)
2674 {
2675 struct edge_list *edge_list;
2676 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
2677 edge_list = compute_pre_data ();
2678 changed |= pre_gcse (edge_list);
2679 free_edge_list (edge_list);
2680 free_pre_mem ();
2681 }
2682
2683 if (flag_gcse_lm)
2684 free_ld_motion_mems ();
2685 remove_fake_exit_edges ();
2686 free_hash_table (&expr_hash_table);
2687
2688 free_gcse_mem ();
2689 obstack_free (&gcse_obstack, NULL);
2690
2691 /* We are finished with alias. */
2692 end_alias_analysis ();
2693
2694 if (dump_file)
2695 {
2696 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2697 current_function_name (), n_basic_blocks, bytes_used);
2698 fprintf (dump_file, "%d substs, %d insns created\n",
2699 gcse_subst_count, gcse_create_count);
2700 }
2701
2702 return changed;
2703 }
2704 \f
2705 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2706 to INSN. If such notes are added to an insn which references a
2707 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2708 that note, because the following loop optimization pass requires
2709 them. */
2710
2711 /* ??? If there was a jump optimization pass after gcse and before loop,
2712 then we would not need to do this here, because jump would add the
2713 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2714
2715 static void
2716 add_label_notes (rtx x, rtx insn)
2717 {
2718 enum rtx_code code = GET_CODE (x);
2719 int i, j;
2720 const char *fmt;
2721
2722 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2723 {
2724 /* This code used to ignore labels that referred to dispatch tables to
2725 avoid flow generating (slightly) worse code.
2726
2727 We no longer ignore such label references (see LABEL_REF handling in
2728 mark_jump_label for additional information). */
2729
2730 /* There's no reason for current users to emit jump-insns with
2731 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2732 notes. */
2733 gcc_assert (!JUMP_P (insn));
2734 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
2735
2736 if (LABEL_P (XEXP (x, 0)))
2737 LABEL_NUSES (XEXP (x, 0))++;
2738
2739 return;
2740 }
2741
2742 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2743 {
2744 if (fmt[i] == 'e')
2745 add_label_notes (XEXP (x, i), insn);
2746 else if (fmt[i] == 'E')
2747 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2748 add_label_notes (XVECEXP (x, i, j), insn);
2749 }
2750 }
2751
2752 /* Code Hoisting variables and subroutines. */
2753
2754 /* Very busy expressions. */
2755 static sbitmap *hoist_vbein;
2756 static sbitmap *hoist_vbeout;
2757
2758 /* ??? We could compute post dominators and run this algorithm in
2759 reverse to perform tail merging, doing so would probably be
2760 more effective than the tail merging code in jump.c.
2761
2762 It's unclear if tail merging could be run in parallel with
2763 code hoisting. It would be nice. */
2764
2765 /* Allocate vars used for code hoisting analysis. */
2766
2767 static void
2768 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2769 {
2770 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2771 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2772 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2773
2774 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2775 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2776 }
2777
2778 /* Free vars used for code hoisting analysis. */
2779
2780 static void
2781 free_code_hoist_mem (void)
2782 {
2783 sbitmap_vector_free (antloc);
2784 sbitmap_vector_free (transp);
2785 sbitmap_vector_free (comp);
2786
2787 sbitmap_vector_free (hoist_vbein);
2788 sbitmap_vector_free (hoist_vbeout);
2789
2790 free_dominance_info (CDI_DOMINATORS);
2791 }
2792
2793 /* Compute the very busy expressions at entry/exit from each block.
2794
2795 An expression is very busy if all paths from a given point
2796 compute the expression. */
2797
2798 static void
2799 compute_code_hoist_vbeinout (void)
2800 {
2801 int changed, passes;
2802 basic_block bb;
2803
2804 bitmap_vector_clear (hoist_vbeout, last_basic_block);
2805 bitmap_vector_clear (hoist_vbein, last_basic_block);
2806
2807 passes = 0;
2808 changed = 1;
2809
2810 while (changed)
2811 {
2812 changed = 0;
2813
2814 /* We scan the blocks in the reverse order to speed up
2815 the convergence. */
2816 FOR_EACH_BB_REVERSE (bb)
2817 {
2818 if (bb->next_bb != EXIT_BLOCK_PTR)
2819 {
2820 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2821 hoist_vbein, bb);
2822
2823 /* Include expressions in VBEout that are calculated
2824 in BB and available at its end. */
2825 bitmap_ior (hoist_vbeout[bb->index],
2826 hoist_vbeout[bb->index], comp[bb->index]);
2827 }
2828
2829 changed |= bitmap_or_and (hoist_vbein[bb->index],
2830 antloc[bb->index],
2831 hoist_vbeout[bb->index],
2832 transp[bb->index]);
2833 }
2834
2835 passes++;
2836 }
2837
2838 if (dump_file)
2839 {
2840 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2841
2842 FOR_EACH_BB (bb)
2843 {
2844 fprintf (dump_file, "vbein (%d): ", bb->index);
2845 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2846 fprintf (dump_file, "vbeout(%d): ", bb->index);
2847 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2848 }
2849 }
2850 }
2851
2852 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2853
2854 static void
2855 compute_code_hoist_data (void)
2856 {
2857 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2858 prune_expressions (false);
2859 compute_code_hoist_vbeinout ();
2860 calculate_dominance_info (CDI_DOMINATORS);
2861 if (dump_file)
2862 fprintf (dump_file, "\n");
2863 }
2864
2865 /* Update register pressure for BB when hoisting an expression from
2866 instruction FROM, if live ranges of inputs are shrunk. Also
2867 maintain live_in information if live range of register referred
2868 in FROM is shrunk.
2869
2870 Return 0 if register pressure doesn't change, otherwise return
2871 the number by which register pressure is decreased.
2872
2873 NOTE: Register pressure won't be increased in this function. */
2874
2875 static int
2876 update_bb_reg_pressure (basic_block bb, rtx from)
2877 {
2878 rtx dreg, insn;
2879 basic_block succ_bb;
2880 df_ref *op, op_ref;
2881 edge succ;
2882 edge_iterator ei;
2883 int decreased_pressure = 0;
2884 int nregs;
2885 enum reg_class pressure_class;
2886
2887 for (op = DF_INSN_USES (from); *op; op++)
2888 {
2889 dreg = DF_REF_REAL_REG (*op);
2890 /* The live range of register is shrunk only if it isn't:
2891 1. referred on any path from the end of this block to EXIT, or
2892 2. referred by insns other than FROM in this block. */
2893 FOR_EACH_EDGE (succ, ei, bb->succs)
2894 {
2895 succ_bb = succ->dest;
2896 if (succ_bb == EXIT_BLOCK_PTR)
2897 continue;
2898
2899 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2900 break;
2901 }
2902 if (succ != NULL)
2903 continue;
2904
2905 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2906 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2907 {
2908 if (!DF_REF_INSN_INFO (op_ref))
2909 continue;
2910
2911 insn = DF_REF_INSN (op_ref);
2912 if (BLOCK_FOR_INSN (insn) == bb
2913 && NONDEBUG_INSN_P (insn) && insn != from)
2914 break;
2915 }
2916
2917 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2918 /* Decrease register pressure and update live_in information for
2919 this block. */
2920 if (!op_ref && pressure_class != NO_REGS)
2921 {
2922 decreased_pressure += nregs;
2923 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2924 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2925 }
2926 }
2927 return decreased_pressure;
2928 }
2929
2930 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2931 flow graph, if it can reach BB unimpared. Stop the search if the
2932 expression would need to be moved more than DISTANCE instructions.
2933
2934 DISTANCE is the number of instructions through which EXPR can be
2935 hoisted up in flow graph.
2936
2937 BB_SIZE points to an array which contains the number of instructions
2938 for each basic block.
2939
2940 PRESSURE_CLASS and NREGS are register class and number of hard registers
2941 for storing EXPR.
2942
2943 HOISTED_BBS points to a bitmap indicating basic blocks through which
2944 EXPR is hoisted.
2945
2946 FROM is the instruction from which EXPR is hoisted.
2947
2948 It's unclear exactly what Muchnick meant by "unimpared". It seems
2949 to me that the expression must either be computed or transparent in
2950 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2951 would allow the expression to be hoisted out of loops, even if
2952 the expression wasn't a loop invariant.
2953
2954 Contrast this to reachability for PRE where an expression is
2955 considered reachable if *any* path reaches instead of *all*
2956 paths. */
2957
2958 static int
2959 should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
2960 basic_block bb, sbitmap visited, int distance,
2961 int *bb_size, enum reg_class pressure_class,
2962 int *nregs, bitmap hoisted_bbs, rtx from)
2963 {
2964 unsigned int i;
2965 edge pred;
2966 edge_iterator ei;
2967 sbitmap_iterator sbi;
2968 int visited_allocated_locally = 0;
2969 int decreased_pressure = 0;
2970
2971 if (flag_ira_hoist_pressure)
2972 {
2973 /* Record old information of basic block BB when it is visited
2974 at the first time. */
2975 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2976 {
2977 struct bb_data *data = BB_DATA (bb);
2978 bitmap_copy (data->backup, data->live_in);
2979 data->old_pressure = data->max_reg_pressure[pressure_class];
2980 }
2981 decreased_pressure = update_bb_reg_pressure (bb, from);
2982 }
2983 /* Terminate the search if distance, for which EXPR is allowed to move,
2984 is exhausted. */
2985 if (distance > 0)
2986 {
2987 if (flag_ira_hoist_pressure)
2988 {
2989 /* Prefer to hoist EXPR if register pressure is decreased. */
2990 if (decreased_pressure > *nregs)
2991 distance += bb_size[bb->index];
2992 /* Let EXPR be hoisted through basic block at no cost if one
2993 of following conditions is satisfied:
2994
2995 1. The basic block has low register pressure.
2996 2. Register pressure won't be increases after hoisting EXPR.
2997
2998 Constant expressions is handled conservatively, because
2999 hoisting constant expression aggressively results in worse
3000 code. This decision is made by the observation of CSiBE
3001 on ARM target, while it has no obvious effect on other
3002 targets like x86, x86_64, mips and powerpc. */
3003 else if (CONST_INT_P (expr->expr)
3004 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
3005 >= ira_class_hard_regs_num[pressure_class]
3006 && decreased_pressure < *nregs))
3007 distance -= bb_size[bb->index];
3008 }
3009 else
3010 distance -= bb_size[bb->index];
3011
3012 if (distance <= 0)
3013 return 0;
3014 }
3015 else
3016 gcc_assert (distance == 0);
3017
3018 if (visited == NULL)
3019 {
3020 visited_allocated_locally = 1;
3021 visited = sbitmap_alloc (last_basic_block);
3022 bitmap_clear (visited);
3023 }
3024
3025 FOR_EACH_EDGE (pred, ei, bb->preds)
3026 {
3027 basic_block pred_bb = pred->src;
3028
3029 if (pred->src == ENTRY_BLOCK_PTR)
3030 break;
3031 else if (pred_bb == expr_bb)
3032 continue;
3033 else if (bitmap_bit_p (visited, pred_bb->index))
3034 continue;
3035 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
3036 break;
3037 /* Not killed. */
3038 else
3039 {
3040 bitmap_set_bit (visited, pred_bb->index);
3041 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
3042 visited, distance, bb_size,
3043 pressure_class, nregs,
3044 hoisted_bbs, from))
3045 break;
3046 }
3047 }
3048 if (visited_allocated_locally)
3049 {
3050 /* If EXPR can be hoisted to expr_bb, record basic blocks through
3051 which EXPR is hoisted in hoisted_bbs. */
3052 if (flag_ira_hoist_pressure && !pred)
3053 {
3054 /* Record the basic block from which EXPR is hoisted. */
3055 bitmap_set_bit (visited, bb->index);
3056 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3057 bitmap_set_bit (hoisted_bbs, i);
3058 }
3059 sbitmap_free (visited);
3060 }
3061
3062 return (pred == NULL);
3063 }
3064 \f
3065 /* Find occurrence in BB. */
3066
3067 static struct occr *
3068 find_occr_in_bb (struct occr *occr, basic_block bb)
3069 {
3070 /* Find the right occurrence of this expression. */
3071 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3072 occr = occr->next;
3073
3074 return occr;
3075 }
3076
3077 /* Actually perform code hoisting.
3078
3079 The code hoisting pass can hoist multiple computations of the same
3080 expression along dominated path to a dominating basic block, like
3081 from b2/b3 to b1 as depicted below:
3082
3083 b1 ------
3084 /\ |
3085 / \ |
3086 bx by distance
3087 / \ |
3088 / \ |
3089 b2 b3 ------
3090
3091 Unfortunately code hoisting generally extends the live range of an
3092 output pseudo register, which increases register pressure and hurts
3093 register allocation. To address this issue, an attribute MAX_DISTANCE
3094 is computed and attached to each expression. The attribute is computed
3095 from rtx cost of the corresponding expression and it's used to control
3096 how long the expression can be hoisted up in flow graph. As the
3097 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3098 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3099 register pressure if live ranges of inputs are shrunk.
3100
3101 Option "-fira-hoist-pressure" implements register pressure directed
3102 hoist based on upper method. The rationale is:
3103 1. Calculate register pressure for each basic block by reusing IRA
3104 facility.
3105 2. When expression is hoisted through one basic block, GCC checks
3106 the change of live ranges for inputs/output. The basic block's
3107 register pressure will be increased because of extended live
3108 range of output. However, register pressure will be decreased
3109 if the live ranges of inputs are shrunk.
3110 3. After knowing how hoisting affects register pressure, GCC prefers
3111 to hoist the expression if it can decrease register pressure, by
3112 increasing DISTANCE of the corresponding expression.
3113 4. If hoisting the expression increases register pressure, GCC checks
3114 register pressure of the basic block and decrease DISTANCE only if
3115 the register pressure is high. In other words, expression will be
3116 hoisted through at no cost if the basic block has low register
3117 pressure.
3118 5. Update register pressure information for basic blocks through
3119 which expression is hoisted. */
3120
3121 static int
3122 hoist_code (void)
3123 {
3124 basic_block bb, dominated;
3125 vec<basic_block> dom_tree_walk;
3126 unsigned int dom_tree_walk_index;
3127 vec<basic_block> domby;
3128 unsigned int i, j, k;
3129 struct expr **index_map;
3130 struct expr *expr;
3131 int *to_bb_head;
3132 int *bb_size;
3133 int changed = 0;
3134 struct bb_data *data;
3135 /* Basic blocks that have occurrences reachable from BB. */
3136 bitmap from_bbs;
3137 /* Basic blocks through which expr is hoisted. */
3138 bitmap hoisted_bbs = NULL;
3139 bitmap_iterator bi;
3140
3141 /* Compute a mapping from expression number (`bitmap_index') to
3142 hash table entry. */
3143
3144 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3145 for (i = 0; i < expr_hash_table.size; i++)
3146 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3147 index_map[expr->bitmap_index] = expr;
3148
3149 /* Calculate sizes of basic blocks and note how far
3150 each instruction is from the start of its block. We then use this
3151 data to restrict distance an expression can travel. */
3152
3153 to_bb_head = XCNEWVEC (int, get_max_uid ());
3154 bb_size = XCNEWVEC (int, last_basic_block);
3155
3156 FOR_EACH_BB (bb)
3157 {
3158 rtx insn;
3159 int to_head;
3160
3161 to_head = 0;
3162 FOR_BB_INSNS (bb, insn)
3163 {
3164 /* Don't count debug instructions to avoid them affecting
3165 decision choices. */
3166 if (NONDEBUG_INSN_P (insn))
3167 to_bb_head[INSN_UID (insn)] = to_head++;
3168 }
3169
3170 bb_size[bb->index] = to_head;
3171 }
3172
3173 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
3174 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
3175 == ENTRY_BLOCK_PTR->next_bb));
3176
3177 from_bbs = BITMAP_ALLOC (NULL);
3178 if (flag_ira_hoist_pressure)
3179 hoisted_bbs = BITMAP_ALLOC (NULL);
3180
3181 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3182 ENTRY_BLOCK_PTR->next_bb);
3183
3184 /* Walk over each basic block looking for potentially hoistable
3185 expressions, nothing gets hoisted from the entry block. */
3186 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3187 {
3188 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3189
3190 if (domby.length () == 0)
3191 continue;
3192
3193 /* Examine each expression that is very busy at the exit of this
3194 block. These are the potentially hoistable expressions. */
3195 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3196 {
3197 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3198 {
3199 int nregs = 0;
3200 enum reg_class pressure_class = NO_REGS;
3201 /* Current expression. */
3202 struct expr *expr = index_map[i];
3203 /* Number of occurrences of EXPR that can be hoisted to BB. */
3204 int hoistable = 0;
3205 /* Occurrences reachable from BB. */
3206 vec<occr_t> occrs_to_hoist = vNULL;
3207 /* We want to insert the expression into BB only once, so
3208 note when we've inserted it. */
3209 int insn_inserted_p;
3210 occr_t occr;
3211
3212 /* If an expression is computed in BB and is available at end of
3213 BB, hoist all occurrences dominated by BB to BB. */
3214 if (bitmap_bit_p (comp[bb->index], i))
3215 {
3216 occr = find_occr_in_bb (expr->antic_occr, bb);
3217
3218 if (occr)
3219 {
3220 /* An occurrence might've been already deleted
3221 while processing a dominator of BB. */
3222 if (!occr->deleted_p)
3223 {
3224 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3225 hoistable++;
3226 }
3227 }
3228 else
3229 hoistable++;
3230 }
3231
3232 /* We've found a potentially hoistable expression, now
3233 we look at every block BB dominates to see if it
3234 computes the expression. */
3235 FOR_EACH_VEC_ELT (domby, j, dominated)
3236 {
3237 int max_distance;
3238
3239 /* Ignore self dominance. */
3240 if (bb == dominated)
3241 continue;
3242 /* We've found a dominated block, now see if it computes
3243 the busy expression and whether or not moving that
3244 expression to the "beginning" of that block is safe. */
3245 if (!bitmap_bit_p (antloc[dominated->index], i))
3246 continue;
3247
3248 occr = find_occr_in_bb (expr->antic_occr, dominated);
3249 gcc_assert (occr);
3250
3251 /* An occurrence might've been already deleted
3252 while processing a dominator of BB. */
3253 if (occr->deleted_p)
3254 continue;
3255 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3256
3257 max_distance = expr->max_distance;
3258 if (max_distance > 0)
3259 /* Adjust MAX_DISTANCE to account for the fact that
3260 OCCR won't have to travel all of DOMINATED, but
3261 only part of it. */
3262 max_distance += (bb_size[dominated->index]
3263 - to_bb_head[INSN_UID (occr->insn)]);
3264
3265 pressure_class = get_pressure_class_and_nregs (occr->insn,
3266 &nregs);
3267
3268 /* Note if the expression should be hoisted from the dominated
3269 block to BB if it can reach DOMINATED unimpared.
3270
3271 Keep track of how many times this expression is hoistable
3272 from a dominated block into BB. */
3273 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3274 max_distance, bb_size,
3275 pressure_class, &nregs,
3276 hoisted_bbs, occr->insn))
3277 {
3278 hoistable++;
3279 occrs_to_hoist.safe_push (occr);
3280 bitmap_set_bit (from_bbs, dominated->index);
3281 }
3282 }
3283
3284 /* If we found more than one hoistable occurrence of this
3285 expression, then note it in the vector of expressions to
3286 hoist. It makes no sense to hoist things which are computed
3287 in only one BB, and doing so tends to pessimize register
3288 allocation. One could increase this value to try harder
3289 to avoid any possible code expansion due to register
3290 allocation issues; however experiments have shown that
3291 the vast majority of hoistable expressions are only movable
3292 from two successors, so raising this threshold is likely
3293 to nullify any benefit we get from code hoisting. */
3294 if (hoistable > 1 && dbg_cnt (hoist_insn))
3295 {
3296 /* If (hoistable != vec::length), then there is
3297 an occurrence of EXPR in BB itself. Don't waste
3298 time looking for LCA in this case. */
3299 if ((unsigned) hoistable == occrs_to_hoist.length ())
3300 {
3301 basic_block lca;
3302
3303 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3304 from_bbs);
3305 if (lca != bb)
3306 /* Punt, it's better to hoist these occurrences to
3307 LCA. */
3308 occrs_to_hoist.release ();
3309 }
3310 }
3311 else
3312 /* Punt, no point hoisting a single occurence. */
3313 occrs_to_hoist.release ();
3314
3315 if (flag_ira_hoist_pressure
3316 && !occrs_to_hoist.is_empty ())
3317 {
3318 /* Increase register pressure of basic blocks to which
3319 expr is hoisted because of extended live range of
3320 output. */
3321 data = BB_DATA (bb);
3322 data->max_reg_pressure[pressure_class] += nregs;
3323 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3324 {
3325 data = BB_DATA (BASIC_BLOCK (k));
3326 data->max_reg_pressure[pressure_class] += nregs;
3327 }
3328 }
3329 else if (flag_ira_hoist_pressure)
3330 {
3331 /* Restore register pressure and live_in info for basic
3332 blocks recorded in hoisted_bbs when expr will not be
3333 hoisted. */
3334 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3335 {
3336 data = BB_DATA (BASIC_BLOCK (k));
3337 bitmap_copy (data->live_in, data->backup);
3338 data->max_reg_pressure[pressure_class]
3339 = data->old_pressure;
3340 }
3341 }
3342
3343 if (flag_ira_hoist_pressure)
3344 bitmap_clear (hoisted_bbs);
3345
3346 insn_inserted_p = 0;
3347
3348 /* Walk through occurrences of I'th expressions we want
3349 to hoist to BB and make the transformations. */
3350 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3351 {
3352 rtx insn;
3353 rtx set;
3354
3355 gcc_assert (!occr->deleted_p);
3356
3357 insn = occr->insn;
3358 set = single_set (insn);
3359 gcc_assert (set);
3360
3361 /* Create a pseudo-reg to store the result of reaching
3362 expressions into. Get the mode for the new pseudo
3363 from the mode of the original destination pseudo.
3364
3365 It is important to use new pseudos whenever we
3366 emit a set. This will allow reload to use
3367 rematerialization for such registers. */
3368 if (!insn_inserted_p)
3369 expr->reaching_reg
3370 = gen_reg_rtx_and_attrs (SET_DEST (set));
3371
3372 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3373 insn);
3374 delete_insn (insn);
3375 occr->deleted_p = 1;
3376 changed = 1;
3377 gcse_subst_count++;
3378
3379 if (!insn_inserted_p)
3380 {
3381 insert_insn_end_basic_block (expr, bb);
3382 insn_inserted_p = 1;
3383 }
3384 }
3385
3386 occrs_to_hoist.release ();
3387 bitmap_clear (from_bbs);
3388 }
3389 }
3390 domby.release ();
3391 }
3392
3393 dom_tree_walk.release ();
3394 BITMAP_FREE (from_bbs);
3395 if (flag_ira_hoist_pressure)
3396 BITMAP_FREE (hoisted_bbs);
3397
3398 free (bb_size);
3399 free (to_bb_head);
3400 free (index_map);
3401
3402 return changed;
3403 }
3404
3405 /* Return pressure class and number of needed hard registers (through
3406 *NREGS) of register REGNO. */
3407 static enum reg_class
3408 get_regno_pressure_class (int regno, int *nregs)
3409 {
3410 if (regno >= FIRST_PSEUDO_REGISTER)
3411 {
3412 enum reg_class pressure_class;
3413
3414 pressure_class = reg_allocno_class (regno);
3415 pressure_class = ira_pressure_class_translate[pressure_class];
3416 *nregs
3417 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3418 return pressure_class;
3419 }
3420 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3421 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3422 {
3423 *nregs = 1;
3424 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3425 }
3426 else
3427 {
3428 *nregs = 0;
3429 return NO_REGS;
3430 }
3431 }
3432
3433 /* Return pressure class and number of hard registers (through *NREGS)
3434 for destination of INSN. */
3435 static enum reg_class
3436 get_pressure_class_and_nregs (rtx insn, int *nregs)
3437 {
3438 rtx reg;
3439 enum reg_class pressure_class;
3440 rtx set = single_set (insn);
3441
3442 /* Considered invariant insns have only one set. */
3443 gcc_assert (set != NULL_RTX);
3444 reg = SET_DEST (set);
3445 if (GET_CODE (reg) == SUBREG)
3446 reg = SUBREG_REG (reg);
3447 if (MEM_P (reg))
3448 {
3449 *nregs = 0;
3450 pressure_class = NO_REGS;
3451 }
3452 else
3453 {
3454 gcc_assert (REG_P (reg));
3455 pressure_class = reg_allocno_class (REGNO (reg));
3456 pressure_class = ira_pressure_class_translate[pressure_class];
3457 *nregs
3458 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3459 }
3460 return pressure_class;
3461 }
3462
3463 /* Increase (if INCR_P) or decrease current register pressure for
3464 register REGNO. */
3465 static void
3466 change_pressure (int regno, bool incr_p)
3467 {
3468 int nregs;
3469 enum reg_class pressure_class;
3470
3471 pressure_class = get_regno_pressure_class (regno, &nregs);
3472 if (! incr_p)
3473 curr_reg_pressure[pressure_class] -= nregs;
3474 else
3475 {
3476 curr_reg_pressure[pressure_class] += nregs;
3477 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3478 < curr_reg_pressure[pressure_class])
3479 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3480 = curr_reg_pressure[pressure_class];
3481 }
3482 }
3483
3484 /* Calculate register pressure for each basic block by walking insns
3485 from last to first. */
3486 static void
3487 calculate_bb_reg_pressure (void)
3488 {
3489 int i;
3490 unsigned int j;
3491 rtx insn;
3492 basic_block bb;
3493 bitmap curr_regs_live;
3494 bitmap_iterator bi;
3495
3496
3497 ira_setup_eliminable_regset (false);
3498 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3499 FOR_EACH_BB (bb)
3500 {
3501 curr_bb = bb;
3502 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3503 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3504 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3505 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3506 for (i = 0; i < ira_pressure_classes_num; i++)
3507 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3508 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3509 change_pressure (j, true);
3510
3511 FOR_BB_INSNS_REVERSE (bb, insn)
3512 {
3513 rtx dreg;
3514 int regno;
3515 df_ref *def_rec, *use_rec;
3516
3517 if (! NONDEBUG_INSN_P (insn))
3518 continue;
3519
3520 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
3521 {
3522 dreg = DF_REF_REAL_REG (*def_rec);
3523 gcc_assert (REG_P (dreg));
3524 regno = REGNO (dreg);
3525 if (!(DF_REF_FLAGS (*def_rec)
3526 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3527 {
3528 if (bitmap_clear_bit (curr_regs_live, regno))
3529 change_pressure (regno, false);
3530 }
3531 }
3532
3533 for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
3534 {
3535 dreg = DF_REF_REAL_REG (*use_rec);
3536 gcc_assert (REG_P (dreg));
3537 regno = REGNO (dreg);
3538 if (bitmap_set_bit (curr_regs_live, regno))
3539 change_pressure (regno, true);
3540 }
3541 }
3542 }
3543 BITMAP_FREE (curr_regs_live);
3544
3545 if (dump_file == NULL)
3546 return;
3547
3548 fprintf (dump_file, "\nRegister Pressure: \n");
3549 FOR_EACH_BB (bb)
3550 {
3551 fprintf (dump_file, " Basic block %d: \n", bb->index);
3552 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3553 {
3554 enum reg_class pressure_class;
3555
3556 pressure_class = ira_pressure_classes[i];
3557 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3558 continue;
3559
3560 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3561 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3562 }
3563 }
3564 fprintf (dump_file, "\n");
3565 }
3566
3567 /* Top level routine to perform one code hoisting (aka unification) pass
3568
3569 Return nonzero if a change was made. */
3570
3571 static int
3572 one_code_hoisting_pass (void)
3573 {
3574 int changed = 0;
3575
3576 gcse_subst_count = 0;
3577 gcse_create_count = 0;
3578
3579 /* Return if there's nothing to do, or it is too expensive. */
3580 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3581 || is_too_expensive (_("GCSE disabled")))
3582 return 0;
3583
3584 doing_code_hoisting_p = true;
3585
3586 /* Calculate register pressure for each basic block. */
3587 if (flag_ira_hoist_pressure)
3588 {
3589 regstat_init_n_sets_and_refs ();
3590 ira_set_pseudo_classes (false, dump_file);
3591 alloc_aux_for_blocks (sizeof (struct bb_data));
3592 calculate_bb_reg_pressure ();
3593 regstat_free_n_sets_and_refs ();
3594 }
3595
3596 /* We need alias. */
3597 init_alias_analysis ();
3598
3599 bytes_used = 0;
3600 gcc_obstack_init (&gcse_obstack);
3601 alloc_gcse_mem ();
3602
3603 alloc_hash_table (&expr_hash_table);
3604 compute_hash_table (&expr_hash_table);
3605 if (dump_file)
3606 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3607
3608 if (expr_hash_table.n_elems > 0)
3609 {
3610 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
3611 compute_code_hoist_data ();
3612 changed = hoist_code ();
3613 free_code_hoist_mem ();
3614 }
3615
3616 if (flag_ira_hoist_pressure)
3617 {
3618 free_aux_for_blocks ();
3619 free_reg_info ();
3620 }
3621 free_hash_table (&expr_hash_table);
3622 free_gcse_mem ();
3623 obstack_free (&gcse_obstack, NULL);
3624
3625 /* We are finished with alias. */
3626 end_alias_analysis ();
3627
3628 if (dump_file)
3629 {
3630 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3631 current_function_name (), n_basic_blocks, bytes_used);
3632 fprintf (dump_file, "%d substs, %d insns created\n",
3633 gcse_subst_count, gcse_create_count);
3634 }
3635
3636 doing_code_hoisting_p = false;
3637
3638 return changed;
3639 }
3640 \f
3641 /* Here we provide the things required to do store motion towards the exit.
3642 In order for this to be effective, gcse also needed to be taught how to
3643 move a load when it is killed only by a store to itself.
3644
3645 int i;
3646 float a[10];
3647
3648 void foo(float scale)
3649 {
3650 for (i=0; i<10; i++)
3651 a[i] *= scale;
3652 }
3653
3654 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3655 the load out since its live around the loop, and stored at the bottom
3656 of the loop.
3657
3658 The 'Load Motion' referred to and implemented in this file is
3659 an enhancement to gcse which when using edge based LCM, recognizes
3660 this situation and allows gcse to move the load out of the loop.
3661
3662 Once gcse has hoisted the load, store motion can then push this
3663 load towards the exit, and we end up with no loads or stores of 'i'
3664 in the loop. */
3665
3666 static hashval_t
3667 pre_ldst_expr_hash (const void *p)
3668 {
3669 int do_not_record_p = 0;
3670 const struct ls_expr *const x = (const struct ls_expr *) p;
3671 return
3672 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
3673 }
3674
3675 static int
3676 pre_ldst_expr_eq (const void *p1, const void *p2)
3677 {
3678 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
3679 *const ptr2 = (const struct ls_expr *) p2;
3680 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
3681 }
3682
3683 /* This will search the ldst list for a matching expression. If it
3684 doesn't find one, we create one and initialize it. */
3685
3686 static struct ls_expr *
3687 ldst_entry (rtx x)
3688 {
3689 int do_not_record_p = 0;
3690 struct ls_expr * ptr;
3691 unsigned int hash;
3692 void **slot;
3693 struct ls_expr e;
3694
3695 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3696 NULL, /*have_reg_qty=*/false);
3697
3698 e.pattern = x;
3699 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
3700 if (*slot)
3701 return (struct ls_expr *)*slot;
3702
3703 ptr = XNEW (struct ls_expr);
3704
3705 ptr->next = pre_ldst_mems;
3706 ptr->expr = NULL;
3707 ptr->pattern = x;
3708 ptr->pattern_regs = NULL_RTX;
3709 ptr->loads = NULL_RTX;
3710 ptr->stores = NULL_RTX;
3711 ptr->reaching_reg = NULL_RTX;
3712 ptr->invalid = 0;
3713 ptr->index = 0;
3714 ptr->hash_index = hash;
3715 pre_ldst_mems = ptr;
3716 *slot = ptr;
3717
3718 return ptr;
3719 }
3720
3721 /* Free up an individual ldst entry. */
3722
3723 static void
3724 free_ldst_entry (struct ls_expr * ptr)
3725 {
3726 free_INSN_LIST_list (& ptr->loads);
3727 free_INSN_LIST_list (& ptr->stores);
3728
3729 free (ptr);
3730 }
3731
3732 /* Free up all memory associated with the ldst list. */
3733
3734 static void
3735 free_ld_motion_mems (void)
3736 {
3737 if (pre_ldst_table)
3738 htab_delete (pre_ldst_table);
3739 pre_ldst_table = NULL;
3740
3741 while (pre_ldst_mems)
3742 {
3743 struct ls_expr * tmp = pre_ldst_mems;
3744
3745 pre_ldst_mems = pre_ldst_mems->next;
3746
3747 free_ldst_entry (tmp);
3748 }
3749
3750 pre_ldst_mems = NULL;
3751 }
3752
3753 /* Dump debugging info about the ldst list. */
3754
3755 static void
3756 print_ldst_list (FILE * file)
3757 {
3758 struct ls_expr * ptr;
3759
3760 fprintf (file, "LDST list: \n");
3761
3762 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3763 {
3764 fprintf (file, " Pattern (%3d): ", ptr->index);
3765
3766 print_rtl (file, ptr->pattern);
3767
3768 fprintf (file, "\n Loads : ");
3769
3770 if (ptr->loads)
3771 print_rtl (file, ptr->loads);
3772 else
3773 fprintf (file, "(nil)");
3774
3775 fprintf (file, "\n Stores : ");
3776
3777 if (ptr->stores)
3778 print_rtl (file, ptr->stores);
3779 else
3780 fprintf (file, "(nil)");
3781
3782 fprintf (file, "\n\n");
3783 }
3784
3785 fprintf (file, "\n");
3786 }
3787
3788 /* Returns 1 if X is in the list of ldst only expressions. */
3789
3790 static struct ls_expr *
3791 find_rtx_in_ldst (rtx x)
3792 {
3793 struct ls_expr e;
3794 void **slot;
3795 if (!pre_ldst_table)
3796 return NULL;
3797 e.pattern = x;
3798 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
3799 if (!slot || ((struct ls_expr *)*slot)->invalid)
3800 return NULL;
3801 return (struct ls_expr *) *slot;
3802 }
3803 \f
3804 /* Load Motion for loads which only kill themselves. */
3805
3806 /* Return true if x, a MEM, is a simple access with no side effects.
3807 These are the types of loads we consider for the ld_motion list,
3808 otherwise we let the usual aliasing take care of it. */
3809
3810 static int
3811 simple_mem (const_rtx x)
3812 {
3813 if (MEM_VOLATILE_P (x))
3814 return 0;
3815
3816 if (GET_MODE (x) == BLKmode)
3817 return 0;
3818
3819 /* If we are handling exceptions, we must be careful with memory references
3820 that may trap. If we are not, the behavior is undefined, so we may just
3821 continue. */
3822 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3823 return 0;
3824
3825 if (side_effects_p (x))
3826 return 0;
3827
3828 /* Do not consider function arguments passed on stack. */
3829 if (reg_mentioned_p (stack_pointer_rtx, x))
3830 return 0;
3831
3832 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3833 return 0;
3834
3835 return 1;
3836 }
3837
3838 /* Make sure there isn't a buried reference in this pattern anywhere.
3839 If there is, invalidate the entry for it since we're not capable
3840 of fixing it up just yet.. We have to be sure we know about ALL
3841 loads since the aliasing code will allow all entries in the
3842 ld_motion list to not-alias itself. If we miss a load, we will get
3843 the wrong value since gcse might common it and we won't know to
3844 fix it up. */
3845
3846 static void
3847 invalidate_any_buried_refs (rtx x)
3848 {
3849 const char * fmt;
3850 int i, j;
3851 struct ls_expr * ptr;
3852
3853 /* Invalidate it in the list. */
3854 if (MEM_P (x) && simple_mem (x))
3855 {
3856 ptr = ldst_entry (x);
3857 ptr->invalid = 1;
3858 }
3859
3860 /* Recursively process the insn. */
3861 fmt = GET_RTX_FORMAT (GET_CODE (x));
3862
3863 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3864 {
3865 if (fmt[i] == 'e')
3866 invalidate_any_buried_refs (XEXP (x, i));
3867 else if (fmt[i] == 'E')
3868 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3869 invalidate_any_buried_refs (XVECEXP (x, i, j));
3870 }
3871 }
3872
3873 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3874 being defined as MEM loads and stores to symbols, with no side effects
3875 and no registers in the expression. For a MEM destination, we also
3876 check that the insn is still valid if we replace the destination with a
3877 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3878 which don't match this criteria, they are invalidated and trimmed out
3879 later. */
3880
3881 static void
3882 compute_ld_motion_mems (void)
3883 {
3884 struct ls_expr * ptr;
3885 basic_block bb;
3886 rtx insn;
3887
3888 pre_ldst_mems = NULL;
3889 pre_ldst_table
3890 = htab_create (13, pre_ldst_expr_hash, pre_ldst_expr_eq, NULL);
3891
3892 FOR_EACH_BB (bb)
3893 {
3894 FOR_BB_INSNS (bb, insn)
3895 {
3896 if (NONDEBUG_INSN_P (insn))
3897 {
3898 if (GET_CODE (PATTERN (insn)) == SET)
3899 {
3900 rtx src = SET_SRC (PATTERN (insn));
3901 rtx dest = SET_DEST (PATTERN (insn));
3902
3903 /* Check for a simple LOAD... */
3904 if (MEM_P (src) && simple_mem (src))
3905 {
3906 ptr = ldst_entry (src);
3907 if (REG_P (dest))
3908 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3909 else
3910 ptr->invalid = 1;
3911 }
3912 else
3913 {
3914 /* Make sure there isn't a buried load somewhere. */
3915 invalidate_any_buried_refs (src);
3916 }
3917
3918 /* Check for stores. Don't worry about aliased ones, they
3919 will block any movement we might do later. We only care
3920 about this exact pattern since those are the only
3921 circumstance that we will ignore the aliasing info. */
3922 if (MEM_P (dest) && simple_mem (dest))
3923 {
3924 ptr = ldst_entry (dest);
3925
3926 if (! MEM_P (src)
3927 && GET_CODE (src) != ASM_OPERANDS
3928 /* Check for REG manually since want_to_gcse_p
3929 returns 0 for all REGs. */
3930 && can_assign_to_reg_without_clobbers_p (src))
3931 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3932 else
3933 ptr->invalid = 1;
3934 }
3935 }
3936 else
3937 invalidate_any_buried_refs (PATTERN (insn));
3938 }
3939 }
3940 }
3941 }
3942
3943 /* Remove any references that have been either invalidated or are not in the
3944 expression list for pre gcse. */
3945
3946 static void
3947 trim_ld_motion_mems (void)
3948 {
3949 struct ls_expr * * last = & pre_ldst_mems;
3950 struct ls_expr * ptr = pre_ldst_mems;
3951
3952 while (ptr != NULL)
3953 {
3954 struct expr * expr;
3955
3956 /* Delete if entry has been made invalid. */
3957 if (! ptr->invalid)
3958 {
3959 /* Delete if we cannot find this mem in the expression list. */
3960 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3961
3962 for (expr = expr_hash_table.table[hash];
3963 expr != NULL;
3964 expr = expr->next_same_hash)
3965 if (expr_equiv_p (expr->expr, ptr->pattern))
3966 break;
3967 }
3968 else
3969 expr = (struct expr *) 0;
3970
3971 if (expr)
3972 {
3973 /* Set the expression field if we are keeping it. */
3974 ptr->expr = expr;
3975 last = & ptr->next;
3976 ptr = ptr->next;
3977 }
3978 else
3979 {
3980 *last = ptr->next;
3981 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
3982 free_ldst_entry (ptr);
3983 ptr = * last;
3984 }
3985 }
3986
3987 /* Show the world what we've found. */
3988 if (dump_file && pre_ldst_mems != NULL)
3989 print_ldst_list (dump_file);
3990 }
3991
3992 /* This routine will take an expression which we are replacing with
3993 a reaching register, and update any stores that are needed if
3994 that expression is in the ld_motion list. Stores are updated by
3995 copying their SRC to the reaching register, and then storing
3996 the reaching register into the store location. These keeps the
3997 correct value in the reaching register for the loads. */
3998
3999 static void
4000 update_ld_motion_stores (struct expr * expr)
4001 {
4002 struct ls_expr * mem_ptr;
4003
4004 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
4005 {
4006 /* We can try to find just the REACHED stores, but is shouldn't
4007 matter to set the reaching reg everywhere... some might be
4008 dead and should be eliminated later. */
4009
4010 /* We replace (set mem expr) with (set reg expr) (set mem reg)
4011 where reg is the reaching reg used in the load. We checked in
4012 compute_ld_motion_mems that we can replace (set mem expr) with
4013 (set reg expr) in that insn. */
4014 rtx list = mem_ptr->stores;
4015
4016 for ( ; list != NULL_RTX; list = XEXP (list, 1))
4017 {
4018 rtx insn = XEXP (list, 0);
4019 rtx pat = PATTERN (insn);
4020 rtx src = SET_SRC (pat);
4021 rtx reg = expr->reaching_reg;
4022 rtx copy;
4023
4024 /* If we've already copied it, continue. */
4025 if (expr->reaching_reg == src)
4026 continue;
4027
4028 if (dump_file)
4029 {
4030 fprintf (dump_file, "PRE: store updated with reaching reg ");
4031 print_rtl (dump_file, reg);
4032 fprintf (dump_file, ":\n ");
4033 print_inline_rtx (dump_file, insn, 8);
4034 fprintf (dump_file, "\n");
4035 }
4036
4037 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
4038 emit_insn_before (copy, insn);
4039 SET_SRC (pat) = reg;
4040 df_insn_rescan (insn);
4041
4042 /* un-recognize this pattern since it's probably different now. */
4043 INSN_CODE (insn) = -1;
4044 gcse_create_count++;
4045 }
4046 }
4047 }
4048 \f
4049 /* Return true if the graph is too expensive to optimize. PASS is the
4050 optimization about to be performed. */
4051
4052 static bool
4053 is_too_expensive (const char *pass)
4054 {
4055 /* Trying to perform global optimizations on flow graphs which have
4056 a high connectivity will take a long time and is unlikely to be
4057 particularly useful.
4058
4059 In normal circumstances a cfg should have about twice as many
4060 edges as blocks. But we do not want to punish small functions
4061 which have a couple switch statements. Rather than simply
4062 threshold the number of blocks, uses something with a more
4063 graceful degradation. */
4064 if (n_edges > 20000 + n_basic_blocks * 4)
4065 {
4066 warning (OPT_Wdisabled_optimization,
4067 "%s: %d basic blocks and %d edges/basic block",
4068 pass, n_basic_blocks, n_edges / n_basic_blocks);
4069
4070 return true;
4071 }
4072
4073 /* If allocating memory for the dataflow bitmaps would take up too much
4074 storage it's better just to disable the optimization. */
4075 if ((n_basic_blocks
4076 * SBITMAP_SET_SIZE (max_reg_num ())
4077 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
4078 {
4079 warning (OPT_Wdisabled_optimization,
4080 "%s: %d basic blocks and %d registers",
4081 pass, n_basic_blocks, max_reg_num ());
4082
4083 return true;
4084 }
4085
4086 return false;
4087 }
4088 \f
4089 /* All the passes implemented in this file. Each pass has its
4090 own gate and execute function, and at the end of the file a
4091 pass definition for passes.c.
4092
4093 We do not construct an accurate cfg in functions which call
4094 setjmp, so none of these passes runs if the function calls
4095 setjmp.
4096 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4097
4098 static bool
4099 gate_rtl_pre (void)
4100 {
4101 return optimize > 0 && flag_gcse
4102 && !cfun->calls_setjmp
4103 && optimize_function_for_speed_p (cfun)
4104 && dbg_cnt (pre);
4105 }
4106
4107 static unsigned int
4108 execute_rtl_pre (void)
4109 {
4110 int changed;
4111 delete_unreachable_blocks ();
4112 df_analyze ();
4113 changed = one_pre_gcse_pass ();
4114 flag_rerun_cse_after_global_opts |= changed;
4115 if (changed)
4116 cleanup_cfg (0);
4117 return 0;
4118 }
4119
4120 static bool
4121 gate_rtl_hoist (void)
4122 {
4123 return optimize > 0 && flag_gcse
4124 && !cfun->calls_setjmp
4125 /* It does not make sense to run code hoisting unless we are optimizing
4126 for code size -- it rarely makes programs faster, and can make then
4127 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4128 && optimize_function_for_size_p (cfun)
4129 && dbg_cnt (hoist);
4130 }
4131
4132 static unsigned int
4133 execute_rtl_hoist (void)
4134 {
4135 int changed;
4136 delete_unreachable_blocks ();
4137 df_analyze ();
4138 changed = one_code_hoisting_pass ();
4139 flag_rerun_cse_after_global_opts |= changed;
4140 if (changed)
4141 cleanup_cfg (0);
4142 return 0;
4143 }
4144
4145 struct rtl_opt_pass pass_rtl_pre =
4146 {
4147 {
4148 RTL_PASS,
4149 "rtl pre", /* name */
4150 OPTGROUP_NONE, /* optinfo_flags */
4151 gate_rtl_pre, /* gate */
4152 execute_rtl_pre, /* execute */
4153 NULL, /* sub */
4154 NULL, /* next */
4155 0, /* static_pass_number */
4156 TV_PRE, /* tv_id */
4157 PROP_cfglayout, /* properties_required */
4158 0, /* properties_provided */
4159 0, /* properties_destroyed */
4160 0, /* todo_flags_start */
4161 TODO_df_finish | TODO_verify_rtl_sharing |
4162 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4163 }
4164 };
4165
4166 struct rtl_opt_pass pass_rtl_hoist =
4167 {
4168 {
4169 RTL_PASS,
4170 "hoist", /* name */
4171 OPTGROUP_NONE, /* optinfo_flags */
4172 gate_rtl_hoist, /* gate */
4173 execute_rtl_hoist, /* execute */
4174 NULL, /* sub */
4175 NULL, /* next */
4176 0, /* static_pass_number */
4177 TV_HOIST, /* tv_id */
4178 PROP_cfglayout, /* properties_required */
4179 0, /* properties_provided */
4180 0, /* properties_destroyed */
4181 0, /* todo_flags_start */
4182 TODO_df_finish | TODO_verify_rtl_sharing |
4183 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4184 }
4185 };
4186
4187 #include "gt-gcse.h"