]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
Apply mechanical replacement (generated patch).
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "params.h"
158 #include "intl.h"
159 #include "tree-pass.h"
160 #include "dbgcnt.h"
161 #include "gcse.h"
162 #include "gcse-common.h"
163 #include "function-abi.h"
164
165 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
166 are a superset of those done by classic GCSE.
167
168 Two passes of copy/constant propagation are done around PRE or hoisting
169 because the first one enables more GCSE and the second one helps to clean
170 up the copies that PRE and HOIST create. This is needed more for PRE than
171 for HOIST because code hoisting will try to use an existing register
172 containing the common subexpression rather than create a new one. This is
173 harder to do for PRE because of the code motion (which HOIST doesn't do).
174
175 Expressions we are interested in GCSE-ing are of the form
176 (set (pseudo-reg) (expression)).
177 Function want_to_gcse_p says what these are.
178
179 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
180 This allows PRE to hoist expressions that are expressed in multiple insns,
181 such as complex address calculations (e.g. for PIC code, or loads with a
182 high part and a low part).
183
184 PRE handles moving invariant expressions out of loops (by treating them as
185 partially redundant).
186
187 **********************
188
189 We used to support multiple passes but there are diminishing returns in
190 doing so. The first pass usually makes 90% of the changes that are doable.
191 A second pass can make a few more changes made possible by the first pass.
192 Experiments show any further passes don't make enough changes to justify
193 the expense.
194
195 A study of spec92 using an unlimited number of passes:
196 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
197 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
198 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
199
200 It was found doing copy propagation between each pass enables further
201 substitutions.
202
203 This study was done before expressions in REG_EQUAL notes were added as
204 candidate expressions for optimization, and before the GIMPLE optimizers
205 were added. Probably, multiple passes is even less efficient now than
206 at the time when the study was conducted.
207
208 PRE is quite expensive in complicated functions because the DFA can take
209 a while to converge. Hence we only perform one pass.
210
211 **********************
212
213 The steps for PRE are:
214
215 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
216
217 2) Perform the data flow analysis for PRE.
218
219 3) Delete the redundant instructions
220
221 4) Insert the required copies [if any] that make the partially
222 redundant instructions fully redundant.
223
224 5) For other reaching expressions, insert an instruction to copy the value
225 to a newly created pseudo that will reach the redundant instruction.
226
227 The deletion is done first so that when we do insertions we
228 know which pseudo reg to use.
229
230 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
231 argue it is not. The number of iterations for the algorithm to converge
232 is typically 2-4 so I don't view it as that expensive (relatively speaking).
233
234 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
235 we create. To make an expression reach the place where it's redundant,
236 the result of the expression is copied to a new register, and the redundant
237 expression is deleted by replacing it with this new register. Classic GCSE
238 doesn't have this problem as much as it computes the reaching defs of
239 each register in each block and thus can try to use an existing
240 register. */
241 \f
242 /* GCSE global vars. */
243
244 struct target_gcse default_target_gcse;
245 #if SWITCHABLE_TARGET
246 struct target_gcse *this_target_gcse = &default_target_gcse;
247 #endif
248
249 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
250 int flag_rerun_cse_after_global_opts;
251
252 /* An obstack for our working variables. */
253 static struct obstack gcse_obstack;
254
255 /* Hash table of expressions. */
256
257 struct gcse_expr
258 {
259 /* The expression. */
260 rtx expr;
261 /* Index in the available expression bitmaps. */
262 int bitmap_index;
263 /* Next entry with the same hash. */
264 struct gcse_expr *next_same_hash;
265 /* List of anticipatable occurrences in basic blocks in the function.
266 An "anticipatable occurrence" is one that is the first occurrence in the
267 basic block, the operands are not modified in the basic block prior
268 to the occurrence and the output is not used between the start of
269 the block and the occurrence. */
270 struct gcse_occr *antic_occr;
271 /* List of available occurrence in basic blocks in the function.
272 An "available occurrence" is one that is the last occurrence in the
273 basic block and the operands are not modified by following statements in
274 the basic block [including this insn]. */
275 struct gcse_occr *avail_occr;
276 /* Non-null if the computation is PRE redundant.
277 The value is the newly created pseudo-reg to record a copy of the
278 expression in all the places that reach the redundant copy. */
279 rtx reaching_reg;
280 /* Maximum distance in instructions this expression can travel.
281 We avoid moving simple expressions for more than a few instructions
282 to keep register pressure under control.
283 A value of "0" removes restrictions on how far the expression can
284 travel. */
285 HOST_WIDE_INT max_distance;
286 };
287
288 /* Occurrence of an expression.
289 There is one per basic block. If a pattern appears more than once the
290 last appearance is used [or first for anticipatable expressions]. */
291
292 struct gcse_occr
293 {
294 /* Next occurrence of this expression. */
295 struct gcse_occr *next;
296 /* The insn that computes the expression. */
297 rtx_insn *insn;
298 /* Nonzero if this [anticipatable] occurrence has been deleted. */
299 char deleted_p;
300 /* Nonzero if this [available] occurrence has been copied to
301 reaching_reg. */
302 /* ??? This is mutually exclusive with deleted_p, so they could share
303 the same byte. */
304 char copied_p;
305 };
306
307 typedef struct gcse_occr *occr_t;
308
309 /* Expression hash tables.
310 Each hash table is an array of buckets.
311 ??? It is known that if it were an array of entries, structure elements
312 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
313 not clear whether in the final analysis a sufficient amount of memory would
314 be saved as the size of the available expression bitmaps would be larger
315 [one could build a mapping table without holes afterwards though].
316 Someday I'll perform the computation and figure it out. */
317
318 struct gcse_hash_table_d
319 {
320 /* The table itself.
321 This is an array of `expr_hash_table_size' elements. */
322 struct gcse_expr **table;
323
324 /* Size of the hash table, in elements. */
325 unsigned int size;
326
327 /* Number of hash table elements. */
328 unsigned int n_elems;
329 };
330
331 /* Expression hash table. */
332 static struct gcse_hash_table_d expr_hash_table;
333
334 /* This is a list of expressions which are MEMs and will be used by load
335 or store motion.
336 Load motion tracks MEMs which aren't killed by anything except itself,
337 i.e. loads and stores to a single location.
338 We can then allow movement of these MEM refs with a little special
339 allowance. (all stores copy the same value to the reaching reg used
340 for the loads). This means all values used to store into memory must have
341 no side effects so we can re-issue the setter value. */
342
343 struct ls_expr
344 {
345 struct gcse_expr * expr; /* Gcse expression reference for LM. */
346 rtx pattern; /* Pattern of this mem. */
347 rtx pattern_regs; /* List of registers mentioned by the mem. */
348 vec<rtx_insn *> stores; /* INSN list of stores seen. */
349 struct ls_expr * next; /* Next in the list. */
350 int invalid; /* Invalid for some reason. */
351 int index; /* If it maps to a bitmap index. */
352 unsigned int hash_index; /* Index when in a hash table. */
353 rtx reaching_reg; /* Register to use when re-writing. */
354 };
355
356 /* Head of the list of load/store memory refs. */
357 static struct ls_expr * pre_ldst_mems = NULL;
358
359 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
360 {
361 typedef value_type compare_type;
362 static inline hashval_t hash (const ls_expr *);
363 static inline bool equal (const ls_expr *, const ls_expr *);
364 };
365
366 /* Hashtable helpers. */
367 inline hashval_t
368 pre_ldst_expr_hasher::hash (const ls_expr *x)
369 {
370 int do_not_record_p = 0;
371 return
372 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
373 }
374
375 static int expr_equiv_p (const_rtx, const_rtx);
376
377 inline bool
378 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
379 const ls_expr *ptr2)
380 {
381 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
382 }
383
384 /* Hashtable for the load/store memory refs. */
385 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
386
387 /* Bitmap containing one bit for each register in the program.
388 Used when performing GCSE to track which registers have been set since
389 the start of the basic block. */
390 static regset reg_set_bitmap;
391
392 /* Array, indexed by basic block number for a list of insns which modify
393 memory within that block. */
394 static vec<rtx_insn *> *modify_mem_list;
395 static bitmap modify_mem_list_set;
396
397 /* This array parallels modify_mem_list, except that it stores MEMs
398 being set and their canonicalized memory addresses. */
399 static vec<modify_pair> *canon_modify_mem_list;
400
401 /* Bitmap indexed by block numbers to record which blocks contain
402 function calls. */
403 static bitmap blocks_with_calls;
404
405 /* Various variables for statistics gathering. */
406
407 /* Memory used in a pass.
408 This isn't intended to be absolutely precise. Its intent is only
409 to keep an eye on memory usage. */
410 static int bytes_used;
411
412 /* GCSE substitutions made. */
413 static int gcse_subst_count;
414 /* Number of copy instructions created. */
415 static int gcse_create_count;
416 \f
417 /* Doing code hoisting. */
418 static bool doing_code_hoisting_p = false;
419 \f
420 /* For available exprs */
421 static sbitmap *ae_kill;
422 \f
423 /* Data stored for each basic block. */
424 struct bb_data
425 {
426 /* Maximal register pressure inside basic block for given register class
427 (defined only for the pressure classes). */
428 int max_reg_pressure[N_REG_CLASSES];
429 /* Recorded register pressure of basic block before trying to hoist
430 an expression. Will be used to restore the register pressure
431 if the expression should not be hoisted. */
432 int old_pressure;
433 /* Recorded register live_in info of basic block during code hoisting
434 process. BACKUP is used to record live_in info before trying to
435 hoist an expression, and will be used to restore LIVE_IN if the
436 expression should not be hoisted. */
437 bitmap live_in, backup;
438 };
439
440 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
441
442 static basic_block curr_bb;
443
444 /* Current register pressure for each pressure class. */
445 static int curr_reg_pressure[N_REG_CLASSES];
446 \f
447
448 static void compute_can_copy (void);
449 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
450 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
451 static void *gcse_alloc (unsigned long);
452 static void alloc_gcse_mem (void);
453 static void free_gcse_mem (void);
454 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
458 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
459 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
460 static int oprs_available_p (const_rtx, const rtx_insn *);
461 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
462 HOST_WIDE_INT, struct gcse_hash_table_d *);
463 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
464 static void record_last_reg_set_info (rtx_insn *, int);
465 static void record_last_mem_set_info (rtx_insn *);
466 static void record_last_set_info (rtx, const_rtx, void *);
467 static void compute_hash_table (struct gcse_hash_table_d *);
468 static void alloc_hash_table (struct gcse_hash_table_d *);
469 static void free_hash_table (struct gcse_hash_table_d *);
470 static void compute_hash_table_work (struct gcse_hash_table_d *);
471 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
472 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
473 struct gcse_hash_table_d *);
474 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
475 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
476 static void alloc_pre_mem (int, int);
477 static void free_pre_mem (void);
478 static struct edge_list *compute_pre_data (void);
479 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
480 basic_block);
481 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
482 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
483 static void pre_insert_copies (void);
484 static int pre_delete (void);
485 static int pre_gcse (struct edge_list *);
486 static int one_pre_gcse_pass (void);
487 static void add_label_notes (rtx, rtx_insn *);
488 static void alloc_code_hoist_mem (int, int);
489 static void free_code_hoist_mem (void);
490 static void compute_code_hoist_vbeinout (void);
491 static void compute_code_hoist_data (void);
492 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
493 basic_block,
494 sbitmap, HOST_WIDE_INT, int *,
495 enum reg_class,
496 int *, bitmap, rtx_insn *);
497 static int hoist_code (void);
498 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
499 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
500 static int one_code_hoisting_pass (void);
501 static rtx_insn *process_insert_insn (struct gcse_expr *);
502 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
503 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
504 basic_block, char *);
505 static struct ls_expr * ldst_entry (rtx);
506 static void free_ldst_entry (struct ls_expr *);
507 static void free_ld_motion_mems (void);
508 static void print_ldst_list (FILE *);
509 static struct ls_expr * find_rtx_in_ldst (rtx);
510 static int simple_mem (const_rtx);
511 static void invalidate_any_buried_refs (rtx);
512 static void compute_ld_motion_mems (void);
513 static void trim_ld_motion_mems (void);
514 static void update_ld_motion_stores (struct gcse_expr *);
515 static void clear_modify_mem_tables (void);
516 static void free_modify_mem_tables (void);
517
518 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
519 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
520
521 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
522 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
523
524 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
525 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
526
527 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
528 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
529 \f
530 /* Misc. utilities. */
531
532 #define can_copy \
533 (this_target_gcse->x_can_copy)
534 #define can_copy_init_p \
535 (this_target_gcse->x_can_copy_init_p)
536
537 /* Compute which modes support reg/reg copy operations. */
538
539 static void
540 compute_can_copy (void)
541 {
542 int i;
543 #ifndef AVOID_CCMODE_COPIES
544 rtx reg;
545 rtx_insn *insn;
546 #endif
547 memset (can_copy, 0, NUM_MACHINE_MODES);
548
549 start_sequence ();
550 for (i = 0; i < NUM_MACHINE_MODES; i++)
551 if (GET_MODE_CLASS (i) == MODE_CC)
552 {
553 #ifdef AVOID_CCMODE_COPIES
554 can_copy[i] = 0;
555 #else
556 reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
557 insn = emit_insn (gen_rtx_SET (reg, reg));
558 if (recog (PATTERN (insn), insn, NULL) >= 0)
559 can_copy[i] = 1;
560 #endif
561 }
562 else
563 can_copy[i] = 1;
564
565 end_sequence ();
566 }
567
568 /* Returns whether the mode supports reg/reg copy operations. */
569
570 bool
571 can_copy_p (machine_mode mode)
572 {
573 if (! can_copy_init_p)
574 {
575 compute_can_copy ();
576 can_copy_init_p = true;
577 }
578
579 return can_copy[mode] != 0;
580 }
581 \f
582 /* Cover function to xmalloc to record bytes allocated. */
583
584 static void *
585 gmalloc (size_t size)
586 {
587 bytes_used += size;
588 return xmalloc (size);
589 }
590
591 /* Cover function to xcalloc to record bytes allocated. */
592
593 static void *
594 gcalloc (size_t nelem, size_t elsize)
595 {
596 bytes_used += nelem * elsize;
597 return xcalloc (nelem, elsize);
598 }
599
600 /* Cover function to obstack_alloc. */
601
602 static void *
603 gcse_alloc (unsigned long size)
604 {
605 bytes_used += size;
606 return obstack_alloc (&gcse_obstack, size);
607 }
608
609 /* Allocate memory for the reg/memory set tracking tables.
610 This is called at the start of each pass. */
611
612 static void
613 alloc_gcse_mem (void)
614 {
615 /* Allocate vars to track sets of regs. */
616 reg_set_bitmap = ALLOC_REG_SET (NULL);
617
618 /* Allocate array to keep a list of insns which modify memory in each
619 basic block. The two typedefs are needed to work around the
620 pre-processor limitation with template types in macro arguments. */
621 typedef vec<rtx_insn *> vec_rtx_heap;
622 typedef vec<modify_pair> vec_modify_pair_heap;
623 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
624 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
625 last_basic_block_for_fn (cfun));
626 modify_mem_list_set = BITMAP_ALLOC (NULL);
627 blocks_with_calls = BITMAP_ALLOC (NULL);
628 }
629
630 /* Free memory allocated by alloc_gcse_mem. */
631
632 static void
633 free_gcse_mem (void)
634 {
635 FREE_REG_SET (reg_set_bitmap);
636
637 free_modify_mem_tables ();
638 BITMAP_FREE (modify_mem_list_set);
639 BITMAP_FREE (blocks_with_calls);
640 }
641 \f
642 /* Compute the local properties of each recorded expression.
643
644 Local properties are those that are defined by the block, irrespective of
645 other blocks.
646
647 An expression is transparent in a block if its operands are not modified
648 in the block.
649
650 An expression is computed (locally available) in a block if it is computed
651 at least once and expression would contain the same value if the
652 computation was moved to the end of the block.
653
654 An expression is locally anticipatable in a block if it is computed at
655 least once and expression would contain the same value if the computation
656 was moved to the beginning of the block.
657
658 We call this routine for pre and code hoisting. They all compute
659 basically the same information and thus can easily share this code.
660
661 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
662 properties. If NULL, then it is not necessary to compute or record that
663 particular property.
664
665 TABLE controls which hash table to look at. */
666
667 static void
668 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
669 struct gcse_hash_table_d *table)
670 {
671 unsigned int i;
672
673 /* Initialize any bitmaps that were passed in. */
674 if (transp)
675 {
676 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
677 }
678
679 if (comp)
680 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
681 if (antloc)
682 bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
683
684 for (i = 0; i < table->size; i++)
685 {
686 struct gcse_expr *expr;
687
688 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
689 {
690 int indx = expr->bitmap_index;
691 struct gcse_occr *occr;
692
693 /* The expression is transparent in this block if it is not killed.
694 We start by assuming all are transparent [none are killed], and
695 then reset the bits for those that are. */
696 if (transp)
697 compute_transp (expr->expr, indx, transp,
698 blocks_with_calls,
699 modify_mem_list_set,
700 canon_modify_mem_list);
701
702 /* The occurrences recorded in antic_occr are exactly those that
703 we want to set to nonzero in ANTLOC. */
704 if (antloc)
705 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
706 {
707 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
708
709 /* While we're scanning the table, this is a good place to
710 initialize this. */
711 occr->deleted_p = 0;
712 }
713
714 /* The occurrences recorded in avail_occr are exactly those that
715 we want to set to nonzero in COMP. */
716 if (comp)
717 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
718 {
719 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
720
721 /* While we're scanning the table, this is a good place to
722 initialize this. */
723 occr->copied_p = 0;
724 }
725
726 /* While we're scanning the table, this is a good place to
727 initialize this. */
728 expr->reaching_reg = 0;
729 }
730 }
731 }
732 \f
733 /* Hash table support. */
734
735 struct reg_avail_info
736 {
737 basic_block last_bb;
738 int first_set;
739 int last_set;
740 };
741
742 static struct reg_avail_info *reg_avail_info;
743 static basic_block current_bb;
744
745 /* See whether X, the source of a set, is something we want to consider for
746 GCSE. */
747
748 static int
749 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
750 {
751 #ifdef STACK_REGS
752 /* On register stack architectures, don't GCSE constants from the
753 constant pool, as the benefits are often swamped by the overhead
754 of shuffling the register stack between basic blocks. */
755 if (IS_STACK_MODE (GET_MODE (x)))
756 x = avoid_constant_pool_reference (x);
757 #endif
758
759 /* GCSE'ing constants:
760
761 We do not specifically distinguish between constant and non-constant
762 expressions in PRE and Hoist. We use set_src_cost below to limit
763 the maximum distance simple expressions can travel.
764
765 Nevertheless, constants are much easier to GCSE, and, hence,
766 it is easy to overdo the optimizations. Usually, excessive PRE and
767 Hoisting of constant leads to increased register pressure.
768
769 RA can deal with this by rematerialing some of the constants.
770 Therefore, it is important that the back-end generates sets of constants
771 in a way that allows reload rematerialize them under high register
772 pressure, i.e., a pseudo register with REG_EQUAL to constant
773 is set only once. Failing to do so will result in IRA/reload
774 spilling such constants under high register pressure instead of
775 rematerializing them. */
776
777 switch (GET_CODE (x))
778 {
779 case REG:
780 case SUBREG:
781 case CALL:
782 return 0;
783
784 CASE_CONST_ANY:
785 if (!doing_code_hoisting_p)
786 /* Do not PRE constants. */
787 return 0;
788
789 /* FALLTHRU */
790
791 default:
792 if (doing_code_hoisting_p)
793 /* PRE doesn't implement max_distance restriction. */
794 {
795 int cost;
796 HOST_WIDE_INT max_distance;
797
798 gcc_assert (!optimize_function_for_speed_p (cfun)
799 && optimize_function_for_size_p (cfun));
800 cost = set_src_cost (x, mode, 0);
801
802 if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
803 {
804 max_distance
805 = ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
806 if (max_distance == 0)
807 return 0;
808
809 gcc_assert (max_distance > 0);
810 }
811 else
812 max_distance = 0;
813
814 if (max_distance_ptr)
815 *max_distance_ptr = max_distance;
816 }
817
818 return can_assign_to_reg_without_clobbers_p (x, mode);
819 }
820 }
821
822 /* Used internally by can_assign_to_reg_without_clobbers_p. */
823
824 static GTY(()) rtx_insn *test_insn;
825
826 /* Return true if we can assign X to a pseudo register of mode MODE
827 such that the resulting insn does not result in clobbering a hard
828 register as a side-effect.
829
830 Additionally, if the target requires it, check that the resulting insn
831 can be copied. If it cannot, this means that X is special and probably
832 has hidden side-effects we don't want to mess with.
833
834 This function is typically used by code motion passes, to verify
835 that it is safe to insert an insn without worrying about clobbering
836 maybe live hard regs. */
837
838 bool
839 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
840 {
841 int num_clobbers = 0;
842 int icode;
843 bool can_assign = false;
844
845 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
846 if (general_operand (x, mode))
847 return 1;
848 else if (GET_MODE (x) == VOIDmode)
849 return 0;
850
851 /* Otherwise, check if we can make a valid insn from it. First initialize
852 our test insn if we haven't already. */
853 if (test_insn == 0)
854 {
855 test_insn
856 = make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
857 FIRST_PSEUDO_REGISTER * 2),
858 const0_rtx));
859 SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
860 INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
861 }
862
863 /* Now make an insn like the one we would make when GCSE'ing and see if
864 valid. */
865 PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
866 SET_SRC (PATTERN (test_insn)) = x;
867
868 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
869
870 /* If the test insn is valid and doesn't need clobbers, and the target also
871 has no objections, we're good. */
872 if (icode >= 0
873 && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
874 && ! (targetm.cannot_copy_insn_p
875 && targetm.cannot_copy_insn_p (test_insn)))
876 can_assign = true;
877
878 /* Make sure test_insn doesn't have any pointers into GC space. */
879 SET_SRC (PATTERN (test_insn)) = NULL_RTX;
880
881 return can_assign;
882 }
883
884 /* Return nonzero if the operands of expression X are unchanged from the
885 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
886 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
887
888 static int
889 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
890 {
891 int i, j;
892 enum rtx_code code;
893 const char *fmt;
894
895 if (x == 0)
896 return 1;
897
898 code = GET_CODE (x);
899 switch (code)
900 {
901 case REG:
902 {
903 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
904
905 if (info->last_bb != current_bb)
906 return 1;
907 if (avail_p)
908 return info->last_set < DF_INSN_LUID (insn);
909 else
910 return info->first_set >= DF_INSN_LUID (insn);
911 }
912
913 case MEM:
914 if (! flag_gcse_lm
915 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
916 x, avail_p))
917 return 0;
918 else
919 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
920
921 case PRE_DEC:
922 case PRE_INC:
923 case POST_DEC:
924 case POST_INC:
925 case PRE_MODIFY:
926 case POST_MODIFY:
927 return 0;
928
929 case PC:
930 case CC0: /*FIXME*/
931 case CONST:
932 CASE_CONST_ANY:
933 case SYMBOL_REF:
934 case LABEL_REF:
935 case ADDR_VEC:
936 case ADDR_DIFF_VEC:
937 return 1;
938
939 default:
940 break;
941 }
942
943 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
944 {
945 if (fmt[i] == 'e')
946 {
947 /* If we are about to do the last recursive call needed at this
948 level, change it into iteration. This function is called enough
949 to be worth it. */
950 if (i == 0)
951 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
952
953 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
954 return 0;
955 }
956 else if (fmt[i] == 'E')
957 for (j = 0; j < XVECLEN (x, i); j++)
958 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
959 return 0;
960 }
961
962 return 1;
963 }
964
965 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
966
967 struct mem_conflict_info
968 {
969 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
970 see if a memory store conflicts with this memory load. */
971 const_rtx mem;
972
973 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
974 references. */
975 bool conflict;
976 };
977
978 /* DEST is the output of an instruction. If it is a memory reference and
979 possibly conflicts with the load found in DATA, then communicate this
980 information back through DATA. */
981
982 static void
983 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
984 void *data)
985 {
986 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
987
988 while (GET_CODE (dest) == SUBREG
989 || GET_CODE (dest) == ZERO_EXTRACT
990 || GET_CODE (dest) == STRICT_LOW_PART)
991 dest = XEXP (dest, 0);
992
993 /* If DEST is not a MEM, then it will not conflict with the load. Note
994 that function calls are assumed to clobber memory, but are handled
995 elsewhere. */
996 if (! MEM_P (dest))
997 return;
998
999 /* If we are setting a MEM in our list of specially recognized MEMs,
1000 don't mark as killed this time. */
1001 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1002 {
1003 if (!find_rtx_in_ldst (dest))
1004 mci->conflict = true;
1005 return;
1006 }
1007
1008 if (true_dependence (dest, GET_MODE (dest), mci->mem))
1009 mci->conflict = true;
1010 }
1011
1012 /* Return nonzero if the expression in X (a memory reference) is killed
1013 in block BB before or after the insn with the LUID in UID_LIMIT.
1014 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1015 before UID_LIMIT.
1016
1017 To check the entire block, set UID_LIMIT to max_uid + 1 and
1018 AVAIL_P to 0. */
1019
1020 static int
1021 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1022 int avail_p)
1023 {
1024 vec<rtx_insn *> list = modify_mem_list[bb->index];
1025 rtx_insn *setter;
1026 unsigned ix;
1027
1028 /* If this is a readonly then we aren't going to be changing it. */
1029 if (MEM_READONLY_P (x))
1030 return 0;
1031
1032 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1033 {
1034 struct mem_conflict_info mci;
1035
1036 /* Ignore entries in the list that do not apply. */
1037 if ((avail_p
1038 && DF_INSN_LUID (setter) < uid_limit)
1039 || (! avail_p
1040 && DF_INSN_LUID (setter) > uid_limit))
1041 continue;
1042
1043 /* If SETTER is a call everything is clobbered. Note that calls
1044 to pure functions are never put on the list, so we need not
1045 worry about them. */
1046 if (CALL_P (setter))
1047 return 1;
1048
1049 /* SETTER must be an INSN of some kind that sets memory. Call
1050 note_stores to examine each hunk of memory that is modified. */
1051 mci.mem = x;
1052 mci.conflict = false;
1053 note_stores (setter, mems_conflict_for_gcse_p, &mci);
1054 if (mci.conflict)
1055 return 1;
1056 }
1057 return 0;
1058 }
1059
1060 /* Return nonzero if the operands of expression X are unchanged from
1061 the start of INSN's basic block up to but not including INSN. */
1062
1063 static int
1064 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1065 {
1066 return oprs_unchanged_p (x, insn, 0);
1067 }
1068
1069 /* Return nonzero if the operands of expression X are unchanged from
1070 INSN to the end of INSN's basic block. */
1071
1072 static int
1073 oprs_available_p (const_rtx x, const rtx_insn *insn)
1074 {
1075 return oprs_unchanged_p (x, insn, 1);
1076 }
1077
1078 /* Hash expression X.
1079
1080 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1081 indicating if a volatile operand is found or if the expression contains
1082 something we don't want to insert in the table. HASH_TABLE_SIZE is
1083 the current size of the hash table to be probed. */
1084
1085 static unsigned int
1086 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1087 int hash_table_size)
1088 {
1089 unsigned int hash;
1090
1091 *do_not_record_p = 0;
1092
1093 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1094 return hash % hash_table_size;
1095 }
1096
1097 /* Return nonzero if exp1 is equivalent to exp2. */
1098
1099 static int
1100 expr_equiv_p (const_rtx x, const_rtx y)
1101 {
1102 return exp_equiv_p (x, y, 0, true);
1103 }
1104
1105 /* Insert expression X in INSN in the hash TABLE.
1106 If it is already present, record it as the last occurrence in INSN's
1107 basic block.
1108
1109 MODE is the mode of the value X is being stored into.
1110 It is only used if X is a CONST_INT.
1111
1112 ANTIC_P is nonzero if X is an anticipatable expression.
1113 AVAIL_P is nonzero if X is an available expression.
1114
1115 MAX_DISTANCE is the maximum distance in instructions this expression can
1116 be moved. */
1117
1118 static void
1119 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1120 int antic_p,
1121 int avail_p, HOST_WIDE_INT max_distance,
1122 struct gcse_hash_table_d *table)
1123 {
1124 int found, do_not_record_p;
1125 unsigned int hash;
1126 struct gcse_expr *cur_expr, *last_expr = NULL;
1127 struct gcse_occr *antic_occr, *avail_occr;
1128
1129 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1130
1131 /* Do not insert expression in table if it contains volatile operands,
1132 or if hash_expr determines the expression is something we don't want
1133 to or can't handle. */
1134 if (do_not_record_p)
1135 return;
1136
1137 cur_expr = table->table[hash];
1138 found = 0;
1139
1140 while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1141 {
1142 /* If the expression isn't found, save a pointer to the end of
1143 the list. */
1144 last_expr = cur_expr;
1145 cur_expr = cur_expr->next_same_hash;
1146 }
1147
1148 if (! found)
1149 {
1150 cur_expr = GOBNEW (struct gcse_expr);
1151 bytes_used += sizeof (struct gcse_expr);
1152 if (table->table[hash] == NULL)
1153 /* This is the first pattern that hashed to this index. */
1154 table->table[hash] = cur_expr;
1155 else
1156 /* Add EXPR to end of this hash chain. */
1157 last_expr->next_same_hash = cur_expr;
1158
1159 /* Set the fields of the expr element. */
1160 cur_expr->expr = x;
1161 cur_expr->bitmap_index = table->n_elems++;
1162 cur_expr->next_same_hash = NULL;
1163 cur_expr->antic_occr = NULL;
1164 cur_expr->avail_occr = NULL;
1165 gcc_assert (max_distance >= 0);
1166 cur_expr->max_distance = max_distance;
1167 }
1168 else
1169 gcc_assert (cur_expr->max_distance == max_distance);
1170
1171 /* Now record the occurrence(s). */
1172 if (antic_p)
1173 {
1174 antic_occr = cur_expr->antic_occr;
1175
1176 if (antic_occr
1177 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1178 antic_occr = NULL;
1179
1180 if (antic_occr)
1181 /* Found another instance of the expression in the same basic block.
1182 Prefer the currently recorded one. We want the first one in the
1183 block and the block is scanned from start to end. */
1184 ; /* nothing to do */
1185 else
1186 {
1187 /* First occurrence of this expression in this basic block. */
1188 antic_occr = GOBNEW (struct gcse_occr);
1189 bytes_used += sizeof (struct gcse_occr);
1190 antic_occr->insn = insn;
1191 antic_occr->next = cur_expr->antic_occr;
1192 antic_occr->deleted_p = 0;
1193 cur_expr->antic_occr = antic_occr;
1194 }
1195 }
1196
1197 if (avail_p)
1198 {
1199 avail_occr = cur_expr->avail_occr;
1200
1201 if (avail_occr
1202 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1203 {
1204 /* Found another instance of the expression in the same basic block.
1205 Prefer this occurrence to the currently recorded one. We want
1206 the last one in the block and the block is scanned from start
1207 to end. */
1208 avail_occr->insn = insn;
1209 }
1210 else
1211 {
1212 /* First occurrence of this expression in this basic block. */
1213 avail_occr = GOBNEW (struct gcse_occr);
1214 bytes_used += sizeof (struct gcse_occr);
1215 avail_occr->insn = insn;
1216 avail_occr->next = cur_expr->avail_occr;
1217 avail_occr->deleted_p = 0;
1218 cur_expr->avail_occr = avail_occr;
1219 }
1220 }
1221 }
1222
1223 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1224
1225 static void
1226 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1227 {
1228 rtx src = SET_SRC (set);
1229 rtx dest = SET_DEST (set);
1230 rtx note;
1231
1232 if (GET_CODE (src) == CALL)
1233 hash_scan_call (src, insn, table);
1234
1235 else if (REG_P (dest))
1236 {
1237 unsigned int regno = REGNO (dest);
1238 HOST_WIDE_INT max_distance = 0;
1239
1240 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1241
1242 This allows us to do a single GCSE pass and still eliminate
1243 redundant constants, addresses or other expressions that are
1244 constructed with multiple instructions.
1245
1246 However, keep the original SRC if INSN is a simple reg-reg move.
1247 In this case, there will almost always be a REG_EQUAL note on the
1248 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1249 for INSN, we miss copy propagation opportunities and we perform the
1250 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1251 do more than one PRE GCSE pass.
1252
1253 Note that this does not impede profitable constant propagations. We
1254 "look through" reg-reg sets in lookup_avail_set. */
1255 note = find_reg_equal_equiv_note (insn);
1256 if (note != 0
1257 && REG_NOTE_KIND (note) == REG_EQUAL
1258 && !REG_P (src)
1259 && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1260 src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1261
1262 /* Only record sets of pseudo-regs in the hash table. */
1263 if (regno >= FIRST_PSEUDO_REGISTER
1264 /* Don't GCSE something if we can't do a reg/reg copy. */
1265 && can_copy_p (GET_MODE (dest))
1266 /* GCSE commonly inserts instruction after the insn. We can't
1267 do that easily for EH edges so disable GCSE on these for now. */
1268 /* ??? We can now easily create new EH landing pads at the
1269 gimple level, for splitting edges; there's no reason we
1270 can't do the same thing at the rtl level. */
1271 && !can_throw_internal (insn)
1272 /* Is SET_SRC something we want to gcse? */
1273 && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1274 /* Don't CSE a nop. */
1275 && ! set_noop_p (set)
1276 /* Don't GCSE if it has attached REG_EQUIV note.
1277 At this point this only function parameters should have
1278 REG_EQUIV notes and if the argument slot is used somewhere
1279 explicitly, it means address of parameter has been taken,
1280 so we should not extend the lifetime of the pseudo. */
1281 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1282 {
1283 /* An expression is not anticipatable if its operands are
1284 modified before this insn or if this is not the only SET in
1285 this insn. The latter condition does not have to mean that
1286 SRC itself is not anticipatable, but we just will not be
1287 able to handle code motion of insns with multiple sets. */
1288 int antic_p = oprs_anticipatable_p (src, insn)
1289 && !multiple_sets (insn);
1290 /* An expression is not available if its operands are
1291 subsequently modified, including this insn. It's also not
1292 available if this is a branch, because we can't insert
1293 a set after the branch. */
1294 int avail_p = (oprs_available_p (src, insn)
1295 && ! JUMP_P (insn));
1296
1297 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1298 max_distance, table);
1299 }
1300 }
1301 /* In case of store we want to consider the memory value as available in
1302 the REG stored in that memory. This makes it possible to remove
1303 redundant loads from due to stores to the same location. */
1304 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1305 {
1306 unsigned int regno = REGNO (src);
1307 HOST_WIDE_INT max_distance = 0;
1308
1309 /* Only record sets of pseudo-regs in the hash table. */
1310 if (regno >= FIRST_PSEUDO_REGISTER
1311 /* Don't GCSE something if we can't do a reg/reg copy. */
1312 && can_copy_p (GET_MODE (src))
1313 /* GCSE commonly inserts instruction after the insn. We can't
1314 do that easily for EH edges so disable GCSE on these for now. */
1315 && !can_throw_internal (insn)
1316 /* Is SET_DEST something we want to gcse? */
1317 && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1318 /* Don't CSE a nop. */
1319 && ! set_noop_p (set)
1320 /* Don't GCSE if it has attached REG_EQUIV note.
1321 At this point this only function parameters should have
1322 REG_EQUIV notes and if the argument slot is used somewhere
1323 explicitly, it means address of parameter has been taken,
1324 so we should not extend the lifetime of the pseudo. */
1325 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1326 || ! MEM_P (XEXP (note, 0))))
1327 {
1328 /* Stores are never anticipatable. */
1329 int antic_p = 0;
1330 /* An expression is not available if its operands are
1331 subsequently modified, including this insn. It's also not
1332 available if this is a branch, because we can't insert
1333 a set after the branch. */
1334 int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1335
1336 /* Record the memory expression (DEST) in the hash table. */
1337 insert_expr_in_table (dest, GET_MODE (dest), insn,
1338 antic_p, avail_p, max_distance, table);
1339 }
1340 }
1341 }
1342
1343 static void
1344 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1345 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1346 {
1347 /* Currently nothing to do. */
1348 }
1349
1350 static void
1351 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1352 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1353 {
1354 /* Currently nothing to do. */
1355 }
1356
1357 /* Process INSN and add hash table entries as appropriate. */
1358
1359 static void
1360 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1361 {
1362 rtx pat = PATTERN (insn);
1363 int i;
1364
1365 /* Pick out the sets of INSN and for other forms of instructions record
1366 what's been modified. */
1367
1368 if (GET_CODE (pat) == SET)
1369 hash_scan_set (pat, insn, table);
1370
1371 else if (GET_CODE (pat) == CLOBBER)
1372 hash_scan_clobber (pat, insn, table);
1373
1374 else if (GET_CODE (pat) == CALL)
1375 hash_scan_call (pat, insn, table);
1376
1377 else if (GET_CODE (pat) == PARALLEL)
1378 for (i = 0; i < XVECLEN (pat, 0); i++)
1379 {
1380 rtx x = XVECEXP (pat, 0, i);
1381
1382 if (GET_CODE (x) == SET)
1383 hash_scan_set (x, insn, table);
1384 else if (GET_CODE (x) == CLOBBER)
1385 hash_scan_clobber (x, insn, table);
1386 else if (GET_CODE (x) == CALL)
1387 hash_scan_call (x, insn, table);
1388 }
1389 }
1390
1391 /* Dump the hash table TABLE to file FILE under the name NAME. */
1392
1393 static void
1394 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1395 {
1396 int i;
1397 /* Flattened out table, so it's printed in proper order. */
1398 struct gcse_expr **flat_table;
1399 unsigned int *hash_val;
1400 struct gcse_expr *expr;
1401
1402 flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1403 hash_val = XNEWVEC (unsigned int, table->n_elems);
1404
1405 for (i = 0; i < (int) table->size; i++)
1406 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1407 {
1408 flat_table[expr->bitmap_index] = expr;
1409 hash_val[expr->bitmap_index] = i;
1410 }
1411
1412 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1413 name, table->size, table->n_elems);
1414
1415 for (i = 0; i < (int) table->n_elems; i++)
1416 if (flat_table[i] != 0)
1417 {
1418 expr = flat_table[i];
1419 fprintf (file, "Index %d (hash value %d; max distance "
1420 HOST_WIDE_INT_PRINT_DEC ")\n ",
1421 expr->bitmap_index, hash_val[i], expr->max_distance);
1422 print_rtl (file, expr->expr);
1423 fprintf (file, "\n");
1424 }
1425
1426 fprintf (file, "\n");
1427
1428 free (flat_table);
1429 free (hash_val);
1430 }
1431
1432 /* Record register first/last/block set information for REGNO in INSN.
1433
1434 first_set records the first place in the block where the register
1435 is set and is used to compute "anticipatability".
1436
1437 last_set records the last place in the block where the register
1438 is set and is used to compute "availability".
1439
1440 last_bb records the block for which first_set and last_set are
1441 valid, as a quick test to invalidate them. */
1442
1443 static void
1444 record_last_reg_set_info (rtx_insn *insn, int regno)
1445 {
1446 struct reg_avail_info *info = &reg_avail_info[regno];
1447 int luid = DF_INSN_LUID (insn);
1448
1449 info->last_set = luid;
1450 if (info->last_bb != current_bb)
1451 {
1452 info->last_bb = current_bb;
1453 info->first_set = luid;
1454 }
1455 }
1456
1457 /* Record memory modification information for INSN. We do not actually care
1458 about the memory location(s) that are set, or even how they are set (consider
1459 a CALL_INSN). We merely need to record which insns modify memory. */
1460
1461 static void
1462 record_last_mem_set_info (rtx_insn *insn)
1463 {
1464 if (! flag_gcse_lm)
1465 return;
1466
1467 record_last_mem_set_info_common (insn, modify_mem_list,
1468 canon_modify_mem_list,
1469 modify_mem_list_set,
1470 blocks_with_calls);
1471 }
1472
1473 /* Called from compute_hash_table via note_stores to handle one
1474 SET or CLOBBER in an insn. DATA is really the instruction in which
1475 the SET is taking place. */
1476
1477 static void
1478 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1479 {
1480 rtx_insn *last_set_insn = (rtx_insn *) data;
1481
1482 if (GET_CODE (dest) == SUBREG)
1483 dest = SUBREG_REG (dest);
1484
1485 if (REG_P (dest))
1486 record_last_reg_set_info (last_set_insn, REGNO (dest));
1487 else if (MEM_P (dest)
1488 /* Ignore pushes, they clobber nothing. */
1489 && ! push_operand (dest, GET_MODE (dest)))
1490 record_last_mem_set_info (last_set_insn);
1491 }
1492
1493 /* Top level function to create an expression hash table.
1494
1495 Expression entries are placed in the hash table if
1496 - they are of the form (set (pseudo-reg) src),
1497 - src is something we want to perform GCSE on,
1498 - none of the operands are subsequently modified in the block
1499
1500 Currently src must be a pseudo-reg or a const_int.
1501
1502 TABLE is the table computed. */
1503
1504 static void
1505 compute_hash_table_work (struct gcse_hash_table_d *table)
1506 {
1507 int i;
1508
1509 /* re-Cache any INSN_LIST nodes we have allocated. */
1510 clear_modify_mem_tables ();
1511 /* Some working arrays used to track first and last set in each block. */
1512 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1513
1514 for (i = 0; i < max_reg_num (); ++i)
1515 reg_avail_info[i].last_bb = NULL;
1516
1517 FOR_EACH_BB_FN (current_bb, cfun)
1518 {
1519 rtx_insn *insn;
1520 unsigned int regno;
1521
1522 /* First pass over the instructions records information used to
1523 determine when registers and memory are first and last set. */
1524 FOR_BB_INSNS (current_bb, insn)
1525 {
1526 if (!NONDEBUG_INSN_P (insn))
1527 continue;
1528
1529 if (CALL_P (insn))
1530 {
1531 hard_reg_set_iterator hrsi;
1532
1533 /* We don't track modes of hard registers, so we need
1534 to be conservative and assume that partial kills
1535 are full kills. */
1536 HARD_REG_SET callee_clobbers
1537 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
1538 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
1539 record_last_reg_set_info (insn, regno);
1540
1541 if (! RTL_CONST_OR_PURE_CALL_P (insn)
1542 || RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
1543 record_last_mem_set_info (insn);
1544 }
1545
1546 note_stores (insn, record_last_set_info, insn);
1547 }
1548
1549 /* The next pass builds the hash table. */
1550 FOR_BB_INSNS (current_bb, insn)
1551 if (NONDEBUG_INSN_P (insn))
1552 hash_scan_insn (insn, table);
1553 }
1554
1555 free (reg_avail_info);
1556 reg_avail_info = NULL;
1557 }
1558
1559 /* Allocate space for the set/expr hash TABLE.
1560 It is used to determine the number of buckets to use. */
1561
1562 static void
1563 alloc_hash_table (struct gcse_hash_table_d *table)
1564 {
1565 int n;
1566
1567 n = get_max_insn_count ();
1568
1569 table->size = n / 4;
1570 if (table->size < 11)
1571 table->size = 11;
1572
1573 /* Attempt to maintain efficient use of hash table.
1574 Making it an odd number is simplest for now.
1575 ??? Later take some measurements. */
1576 table->size |= 1;
1577 n = table->size * sizeof (struct gcse_expr *);
1578 table->table = GNEWVAR (struct gcse_expr *, n);
1579 }
1580
1581 /* Free things allocated by alloc_hash_table. */
1582
1583 static void
1584 free_hash_table (struct gcse_hash_table_d *table)
1585 {
1586 free (table->table);
1587 }
1588
1589 /* Compute the expression hash table TABLE. */
1590
1591 static void
1592 compute_hash_table (struct gcse_hash_table_d *table)
1593 {
1594 /* Initialize count of number of entries in hash table. */
1595 table->n_elems = 0;
1596 memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1597
1598 compute_hash_table_work (table);
1599 }
1600 \f
1601 /* Expression tracking support. */
1602
1603 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1604 static void
1605 clear_modify_mem_tables (void)
1606 {
1607 unsigned i;
1608 bitmap_iterator bi;
1609
1610 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1611 {
1612 modify_mem_list[i].release ();
1613 canon_modify_mem_list[i].release ();
1614 }
1615 bitmap_clear (modify_mem_list_set);
1616 bitmap_clear (blocks_with_calls);
1617 }
1618
1619 /* Release memory used by modify_mem_list_set. */
1620
1621 static void
1622 free_modify_mem_tables (void)
1623 {
1624 clear_modify_mem_tables ();
1625 free (modify_mem_list);
1626 free (canon_modify_mem_list);
1627 modify_mem_list = 0;
1628 canon_modify_mem_list = 0;
1629 }
1630 \f
1631 /* Compute PRE+LCM working variables. */
1632
1633 /* Local properties of expressions. */
1634
1635 /* Nonzero for expressions that are transparent in the block. */
1636 static sbitmap *transp;
1637
1638 /* Nonzero for expressions that are computed (available) in the block. */
1639 static sbitmap *comp;
1640
1641 /* Nonzero for expressions that are locally anticipatable in the block. */
1642 static sbitmap *antloc;
1643
1644 /* Nonzero for expressions where this block is an optimal computation
1645 point. */
1646 static sbitmap *pre_optimal;
1647
1648 /* Nonzero for expressions which are redundant in a particular block. */
1649 static sbitmap *pre_redundant;
1650
1651 /* Nonzero for expressions which should be inserted on a specific edge. */
1652 static sbitmap *pre_insert_map;
1653
1654 /* Nonzero for expressions which should be deleted in a specific block. */
1655 static sbitmap *pre_delete_map;
1656
1657 /* Allocate vars used for PRE analysis. */
1658
1659 static void
1660 alloc_pre_mem (int n_blocks, int n_exprs)
1661 {
1662 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1663 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1664 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1665
1666 pre_optimal = NULL;
1667 pre_redundant = NULL;
1668 pre_insert_map = NULL;
1669 pre_delete_map = NULL;
1670 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1671
1672 /* pre_insert and pre_delete are allocated later. */
1673 }
1674
1675 /* Free vars used for PRE analysis. */
1676
1677 static void
1678 free_pre_mem (void)
1679 {
1680 sbitmap_vector_free (transp);
1681 sbitmap_vector_free (comp);
1682
1683 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1684
1685 if (pre_optimal)
1686 sbitmap_vector_free (pre_optimal);
1687 if (pre_redundant)
1688 sbitmap_vector_free (pre_redundant);
1689 if (pre_insert_map)
1690 sbitmap_vector_free (pre_insert_map);
1691 if (pre_delete_map)
1692 sbitmap_vector_free (pre_delete_map);
1693
1694 transp = comp = NULL;
1695 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1696 }
1697
1698 /* Remove certain expressions from anticipatable and transparent
1699 sets of basic blocks that have incoming abnormal edge.
1700 For PRE remove potentially trapping expressions to avoid placing
1701 them on abnormal edges. For hoisting remove memory references that
1702 can be clobbered by calls. */
1703
1704 static void
1705 prune_expressions (bool pre_p)
1706 {
1707 struct gcse_expr *expr;
1708 unsigned int ui;
1709 basic_block bb;
1710
1711 auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1712 bitmap_clear (prune_exprs);
1713 for (ui = 0; ui < expr_hash_table.size; ui++)
1714 {
1715 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1716 {
1717 /* Note potentially trapping expressions. */
1718 if (may_trap_p (expr->expr))
1719 {
1720 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1721 continue;
1722 }
1723
1724 if (!pre_p && contains_mem_rtx_p (expr->expr))
1725 /* Note memory references that can be clobbered by a call.
1726 We do not split abnormal edges in hoisting, so would
1727 a memory reference get hoisted along an abnormal edge,
1728 it would be placed /before/ the call. Therefore, only
1729 constant memory references can be hoisted along abnormal
1730 edges. */
1731 {
1732 rtx x = expr->expr;
1733
1734 /* Common cases where we might find the MEM which may allow us
1735 to avoid pruning the expression. */
1736 while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1737 x = XEXP (x, 0);
1738
1739 /* If we found the MEM, go ahead and look at it to see if it has
1740 properties that allow us to avoid pruning its expression out
1741 of the tables. */
1742 if (MEM_P (x))
1743 {
1744 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1745 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1746 continue;
1747
1748 if (MEM_READONLY_P (x)
1749 && !MEM_VOLATILE_P (x)
1750 && MEM_NOTRAP_P (x))
1751 /* Constant memory reference, e.g., a PIC address. */
1752 continue;
1753 }
1754
1755 /* ??? Optimally, we would use interprocedural alias
1756 analysis to determine if this mem is actually killed
1757 by this call. */
1758
1759 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1760 }
1761 }
1762 }
1763
1764 FOR_EACH_BB_FN (bb, cfun)
1765 {
1766 edge e;
1767 edge_iterator ei;
1768
1769 /* If the current block is the destination of an abnormal edge, we
1770 kill all trapping (for PRE) and memory (for hoist) expressions
1771 because we won't be able to properly place the instruction on
1772 the edge. So make them neither anticipatable nor transparent.
1773 This is fairly conservative.
1774
1775 ??? For hoisting it may be necessary to check for set-and-jump
1776 instructions here, not just for abnormal edges. The general problem
1777 is that when an expression cannot not be placed right at the end of
1778 a basic block we should account for any side-effects of a subsequent
1779 jump instructions that could clobber the expression. It would
1780 be best to implement this check along the lines of
1781 should_hoist_expr_to_dom where the target block is already known
1782 and, hence, there's no need to conservatively prune expressions on
1783 "intermediate" set-and-jump instructions. */
1784 FOR_EACH_EDGE (e, ei, bb->preds)
1785 if ((e->flags & EDGE_ABNORMAL)
1786 && (pre_p || CALL_P (BB_END (e->src))))
1787 {
1788 bitmap_and_compl (antloc[bb->index],
1789 antloc[bb->index], prune_exprs);
1790 bitmap_and_compl (transp[bb->index],
1791 transp[bb->index], prune_exprs);
1792 break;
1793 }
1794 }
1795 }
1796
1797 /* It may be necessary to insert a large number of insns on edges to
1798 make the existing occurrences of expressions fully redundant. This
1799 routine examines the set of insertions and deletions and if the ratio
1800 of insertions to deletions is too high for a particular expression, then
1801 the expression is removed from the insertion/deletion sets.
1802
1803 N_ELEMS is the number of elements in the hash table. */
1804
1805 static void
1806 prune_insertions_deletions (int n_elems)
1807 {
1808 sbitmap_iterator sbi;
1809
1810 /* We always use I to iterate over blocks/edges and J to iterate over
1811 expressions. */
1812 unsigned int i, j;
1813
1814 /* Counts for the number of times an expression needs to be inserted and
1815 number of times an expression can be removed as a result. */
1816 int *insertions = GCNEWVEC (int, n_elems);
1817 int *deletions = GCNEWVEC (int, n_elems);
1818
1819 /* Set of expressions which require too many insertions relative to
1820 the number of deletions achieved. We will prune these out of the
1821 insertion/deletion sets. */
1822 auto_sbitmap prune_exprs (n_elems);
1823 bitmap_clear (prune_exprs);
1824
1825 /* Iterate over the edges counting the number of times each expression
1826 needs to be inserted. */
1827 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1828 {
1829 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1830 insertions[j]++;
1831 }
1832
1833 /* Similarly for deletions, but those occur in blocks rather than on
1834 edges. */
1835 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1836 {
1837 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1838 deletions[j]++;
1839 }
1840
1841 /* Now that we have accurate counts, iterate over the elements in the
1842 hash table and see if any need too many insertions relative to the
1843 number of evaluations that can be removed. If so, mark them in
1844 PRUNE_EXPRS. */
1845 for (j = 0; j < (unsigned) n_elems; j++)
1846 if (deletions[j]
1847 && (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
1848 bitmap_set_bit (prune_exprs, j);
1849
1850 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1851 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1852 {
1853 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1854 bitmap_clear_bit (pre_insert_map[i], j);
1855
1856 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1857 bitmap_clear_bit (pre_delete_map[i], j);
1858 }
1859
1860 free (insertions);
1861 free (deletions);
1862 }
1863
1864 /* Top level routine to do the dataflow analysis needed by PRE. */
1865
1866 static struct edge_list *
1867 compute_pre_data (void)
1868 {
1869 struct edge_list *edge_list;
1870 basic_block bb;
1871
1872 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1873 prune_expressions (true);
1874 bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1875
1876 /* Compute ae_kill for each basic block using:
1877
1878 ~(TRANSP | COMP)
1879 */
1880
1881 FOR_EACH_BB_FN (bb, cfun)
1882 {
1883 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1884 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1885 }
1886
1887 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1888 ae_kill, &pre_insert_map, &pre_delete_map);
1889 sbitmap_vector_free (antloc);
1890 antloc = NULL;
1891 sbitmap_vector_free (ae_kill);
1892 ae_kill = NULL;
1893
1894 prune_insertions_deletions (expr_hash_table.n_elems);
1895
1896 return edge_list;
1897 }
1898 \f
1899 /* PRE utilities */
1900
1901 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1902 block BB.
1903
1904 VISITED is a pointer to a working buffer for tracking which BB's have
1905 been visited. It is NULL for the top-level call.
1906
1907 We treat reaching expressions that go through blocks containing the same
1908 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1909 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1910 2 as not reaching. The intent is to improve the probability of finding
1911 only one reaching expression and to reduce register lifetimes by picking
1912 the closest such expression. */
1913
1914 static int
1915 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1916 basic_block bb, char *visited)
1917 {
1918 edge pred;
1919 edge_iterator ei;
1920
1921 FOR_EACH_EDGE (pred, ei, bb->preds)
1922 {
1923 basic_block pred_bb = pred->src;
1924
1925 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1926 /* Has predecessor has already been visited? */
1927 || visited[pred_bb->index])
1928 ;/* Nothing to do. */
1929
1930 /* Does this predecessor generate this expression? */
1931 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1932 {
1933 /* Is this the occurrence we're looking for?
1934 Note that there's only one generating occurrence per block
1935 so we just need to check the block number. */
1936 if (occr_bb == pred_bb)
1937 return 1;
1938
1939 visited[pred_bb->index] = 1;
1940 }
1941 /* Ignore this predecessor if it kills the expression. */
1942 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1943 visited[pred_bb->index] = 1;
1944
1945 /* Neither gen nor kill. */
1946 else
1947 {
1948 visited[pred_bb->index] = 1;
1949 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1950 return 1;
1951 }
1952 }
1953
1954 /* All paths have been checked. */
1955 return 0;
1956 }
1957
1958 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1959 memory allocated for that function is returned. */
1960
1961 static int
1962 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1963 {
1964 int rval;
1965 char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1966
1967 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1968
1969 free (visited);
1970 return rval;
1971 }
1972 \f
1973 /* Generate RTL to copy an EXP to REG and return it. */
1974
1975 rtx_insn *
1976 prepare_copy_insn (rtx reg, rtx exp)
1977 {
1978 rtx_insn *pat;
1979
1980 start_sequence ();
1981
1982 /* If the expression is something that's an operand, like a constant,
1983 just copy it to a register. */
1984 if (general_operand (exp, GET_MODE (reg)))
1985 emit_move_insn (reg, exp);
1986
1987 /* Otherwise, make a new insn to compute this expression and make sure the
1988 insn will be recognized (this also adds any needed CLOBBERs). */
1989 else
1990 {
1991 rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1992
1993 if (insn_invalid_p (insn, false))
1994 gcc_unreachable ();
1995 }
1996
1997 pat = get_insns ();
1998 end_sequence ();
1999
2000 return pat;
2001 }
2002
2003 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2004
2005 static rtx_insn *
2006 process_insert_insn (struct gcse_expr *expr)
2007 {
2008 rtx reg = expr->reaching_reg;
2009 /* Copy the expression to make sure we don't have any sharing issues. */
2010 rtx exp = copy_rtx (expr->expr);
2011
2012 return prepare_copy_insn (reg, exp);
2013 }
2014
2015 /* Add EXPR to the end of basic block BB.
2016
2017 This is used by both the PRE and code hoisting. */
2018
2019 static void
2020 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2021 {
2022 rtx_insn *insn = BB_END (bb);
2023 rtx_insn *new_insn;
2024 rtx reg = expr->reaching_reg;
2025 int regno = REGNO (reg);
2026 rtx_insn *pat, *pat_end;
2027
2028 pat = process_insert_insn (expr);
2029 gcc_assert (pat && INSN_P (pat));
2030
2031 pat_end = pat;
2032 while (NEXT_INSN (pat_end) != NULL_RTX)
2033 pat_end = NEXT_INSN (pat_end);
2034
2035 /* If the last insn is a jump, insert EXPR in front [taking care to
2036 handle cc0, etc. properly]. Similarly we need to care trapping
2037 instructions in presence of non-call exceptions. */
2038
2039 if (JUMP_P (insn)
2040 || (NONJUMP_INSN_P (insn)
2041 && (!single_succ_p (bb)
2042 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2043 {
2044 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2045 if cc0 isn't set. */
2046 if (HAVE_cc0)
2047 {
2048 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2049 if (note)
2050 insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2051 else
2052 {
2053 rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2054 if (maybe_cc0_setter
2055 && INSN_P (maybe_cc0_setter)
2056 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2057 insn = maybe_cc0_setter;
2058 }
2059 }
2060
2061 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2062 new_insn = emit_insn_before_noloc (pat, insn, bb);
2063 }
2064
2065 /* Likewise if the last insn is a call, as will happen in the presence
2066 of exception handling. */
2067 else if (CALL_P (insn)
2068 && (!single_succ_p (bb)
2069 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2070 {
2071 /* Keeping in mind targets with small register classes and parameters
2072 in registers, we search backward and place the instructions before
2073 the first parameter is loaded. Do this for everyone for consistency
2074 and a presumption that we'll get better code elsewhere as well. */
2075
2076 /* Since different machines initialize their parameter registers
2077 in different orders, assume nothing. Collect the set of all
2078 parameter registers. */
2079 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2080
2081 /* If we found all the parameter loads, then we want to insert
2082 before the first parameter load.
2083
2084 If we did not find all the parameter loads, then we might have
2085 stopped on the head of the block, which could be a CODE_LABEL.
2086 If we inserted before the CODE_LABEL, then we would be putting
2087 the insn in the wrong basic block. In that case, put the insn
2088 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2089 while (LABEL_P (insn)
2090 || NOTE_INSN_BASIC_BLOCK_P (insn))
2091 insn = NEXT_INSN (insn);
2092
2093 new_insn = emit_insn_before_noloc (pat, insn, bb);
2094 }
2095 else
2096 new_insn = emit_insn_after_noloc (pat, insn, bb);
2097
2098 while (1)
2099 {
2100 if (INSN_P (pat))
2101 add_label_notes (PATTERN (pat), new_insn);
2102 if (pat == pat_end)
2103 break;
2104 pat = NEXT_INSN (pat);
2105 }
2106
2107 gcse_create_count++;
2108
2109 if (dump_file)
2110 {
2111 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2112 bb->index, INSN_UID (new_insn));
2113 fprintf (dump_file, "copying expression %d to reg %d\n",
2114 expr->bitmap_index, regno);
2115 }
2116 }
2117
2118 /* Insert partially redundant expressions on edges in the CFG to make
2119 the expressions fully redundant. */
2120
2121 static int
2122 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2123 {
2124 int e, i, j, num_edges, set_size, did_insert = 0;
2125 sbitmap *inserted;
2126
2127 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2128 if it reaches any of the deleted expressions. */
2129
2130 set_size = pre_insert_map[0]->size;
2131 num_edges = NUM_EDGES (edge_list);
2132 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2133 bitmap_vector_clear (inserted, num_edges);
2134
2135 for (e = 0; e < num_edges; e++)
2136 {
2137 int indx;
2138 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2139
2140 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2141 {
2142 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2143
2144 for (j = indx;
2145 insert && j < (int) expr_hash_table.n_elems;
2146 j++, insert >>= 1)
2147 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2148 {
2149 struct gcse_expr *expr = index_map[j];
2150 struct gcse_occr *occr;
2151
2152 /* Now look at each deleted occurrence of this expression. */
2153 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2154 {
2155 if (! occr->deleted_p)
2156 continue;
2157
2158 /* Insert this expression on this edge if it would
2159 reach the deleted occurrence in BB. */
2160 if (!bitmap_bit_p (inserted[e], j))
2161 {
2162 rtx_insn *insn;
2163 edge eg = INDEX_EDGE (edge_list, e);
2164
2165 /* We can't insert anything on an abnormal and
2166 critical edge, so we insert the insn at the end of
2167 the previous block. There are several alternatives
2168 detailed in Morgans book P277 (sec 10.5) for
2169 handling this situation. This one is easiest for
2170 now. */
2171
2172 if (eg->flags & EDGE_ABNORMAL)
2173 insert_insn_end_basic_block (index_map[j], bb);
2174 else
2175 {
2176 insn = process_insert_insn (index_map[j]);
2177 insert_insn_on_edge (insn, eg);
2178 }
2179
2180 if (dump_file)
2181 {
2182 fprintf (dump_file, "PRE: edge (%d,%d), ",
2183 bb->index,
2184 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2185 fprintf (dump_file, "copy expression %d\n",
2186 expr->bitmap_index);
2187 }
2188
2189 update_ld_motion_stores (expr);
2190 bitmap_set_bit (inserted[e], j);
2191 did_insert = 1;
2192 gcse_create_count++;
2193 }
2194 }
2195 }
2196 }
2197 }
2198
2199 sbitmap_vector_free (inserted);
2200 return did_insert;
2201 }
2202
2203 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2204 Given "old_reg <- expr" (INSN), instead of adding after it
2205 reaching_reg <- old_reg
2206 it's better to do the following:
2207 reaching_reg <- expr
2208 old_reg <- reaching_reg
2209 because this way copy propagation can discover additional PRE
2210 opportunities. But if this fails, we try the old way.
2211 When "expr" is a store, i.e.
2212 given "MEM <- old_reg", instead of adding after it
2213 reaching_reg <- old_reg
2214 it's better to add it before as follows:
2215 reaching_reg <- old_reg
2216 MEM <- reaching_reg. */
2217
2218 static void
2219 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2220 {
2221 rtx reg = expr->reaching_reg;
2222 int regno = REGNO (reg);
2223 int indx = expr->bitmap_index;
2224 rtx pat = PATTERN (insn);
2225 rtx set, first_set;
2226 rtx_insn *new_insn;
2227 rtx old_reg;
2228 int i;
2229
2230 /* This block matches the logic in hash_scan_insn. */
2231 switch (GET_CODE (pat))
2232 {
2233 case SET:
2234 set = pat;
2235 break;
2236
2237 case PARALLEL:
2238 /* Search through the parallel looking for the set whose
2239 source was the expression that we're interested in. */
2240 first_set = NULL_RTX;
2241 set = NULL_RTX;
2242 for (i = 0; i < XVECLEN (pat, 0); i++)
2243 {
2244 rtx x = XVECEXP (pat, 0, i);
2245 if (GET_CODE (x) == SET)
2246 {
2247 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2248 may not find an equivalent expression, but in this
2249 case the PARALLEL will have a single set. */
2250 if (first_set == NULL_RTX)
2251 first_set = x;
2252 if (expr_equiv_p (SET_SRC (x), expr->expr))
2253 {
2254 set = x;
2255 break;
2256 }
2257 }
2258 }
2259
2260 gcc_assert (first_set);
2261 if (set == NULL_RTX)
2262 set = first_set;
2263 break;
2264
2265 default:
2266 gcc_unreachable ();
2267 }
2268
2269 if (REG_P (SET_DEST (set)))
2270 {
2271 old_reg = SET_DEST (set);
2272 /* Check if we can modify the set destination in the original insn. */
2273 if (validate_change (insn, &SET_DEST (set), reg, 0))
2274 {
2275 new_insn = gen_move_insn (old_reg, reg);
2276 new_insn = emit_insn_after (new_insn, insn);
2277 }
2278 else
2279 {
2280 new_insn = gen_move_insn (reg, old_reg);
2281 new_insn = emit_insn_after (new_insn, insn);
2282 }
2283 }
2284 else /* This is possible only in case of a store to memory. */
2285 {
2286 old_reg = SET_SRC (set);
2287 new_insn = gen_move_insn (reg, old_reg);
2288
2289 /* Check if we can modify the set source in the original insn. */
2290 if (validate_change (insn, &SET_SRC (set), reg, 0))
2291 new_insn = emit_insn_before (new_insn, insn);
2292 else
2293 new_insn = emit_insn_after (new_insn, insn);
2294 }
2295
2296 gcse_create_count++;
2297
2298 if (dump_file)
2299 fprintf (dump_file,
2300 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2301 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2302 INSN_UID (insn), regno);
2303 }
2304
2305 /* Copy available expressions that reach the redundant expression
2306 to `reaching_reg'. */
2307
2308 static void
2309 pre_insert_copies (void)
2310 {
2311 unsigned int i, added_copy;
2312 struct gcse_expr *expr;
2313 struct gcse_occr *occr;
2314 struct gcse_occr *avail;
2315
2316 /* For each available expression in the table, copy the result to
2317 `reaching_reg' if the expression reaches a deleted one.
2318
2319 ??? The current algorithm is rather brute force.
2320 Need to do some profiling. */
2321
2322 for (i = 0; i < expr_hash_table.size; i++)
2323 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2324 {
2325 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2326 we don't want to insert a copy here because the expression may not
2327 really be redundant. So only insert an insn if the expression was
2328 deleted. This test also avoids further processing if the
2329 expression wasn't deleted anywhere. */
2330 if (expr->reaching_reg == NULL)
2331 continue;
2332
2333 /* Set when we add a copy for that expression. */
2334 added_copy = 0;
2335
2336 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2337 {
2338 if (! occr->deleted_p)
2339 continue;
2340
2341 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2342 {
2343 rtx_insn *insn = avail->insn;
2344
2345 /* No need to handle this one if handled already. */
2346 if (avail->copied_p)
2347 continue;
2348
2349 /* Don't handle this one if it's a redundant one. */
2350 if (insn->deleted ())
2351 continue;
2352
2353 /* Or if the expression doesn't reach the deleted one. */
2354 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2355 expr,
2356 BLOCK_FOR_INSN (occr->insn)))
2357 continue;
2358
2359 added_copy = 1;
2360
2361 /* Copy the result of avail to reaching_reg. */
2362 pre_insert_copy_insn (expr, insn);
2363 avail->copied_p = 1;
2364 }
2365 }
2366
2367 if (added_copy)
2368 update_ld_motion_stores (expr);
2369 }
2370 }
2371
2372 struct set_data
2373 {
2374 rtx_insn *insn;
2375 const_rtx set;
2376 int nsets;
2377 };
2378
2379 /* Increment number of sets and record set in DATA. */
2380
2381 static void
2382 record_set_data (rtx dest, const_rtx set, void *data)
2383 {
2384 struct set_data *s = (struct set_data *)data;
2385
2386 if (GET_CODE (set) == SET)
2387 {
2388 /* We allow insns having multiple sets, where all but one are
2389 dead as single set insns. In the common case only a single
2390 set is present, so we want to avoid checking for REG_UNUSED
2391 notes unless necessary. */
2392 if (s->nsets == 1
2393 && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2394 && !side_effects_p (s->set))
2395 s->nsets = 0;
2396
2397 if (!s->nsets)
2398 {
2399 /* Record this set. */
2400 s->nsets += 1;
2401 s->set = set;
2402 }
2403 else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2404 || side_effects_p (set))
2405 s->nsets += 1;
2406 }
2407 }
2408
2409 static const_rtx
2410 single_set_gcse (rtx_insn *insn)
2411 {
2412 struct set_data s;
2413 rtx pattern;
2414
2415 gcc_assert (INSN_P (insn));
2416
2417 /* Optimize common case. */
2418 pattern = PATTERN (insn);
2419 if (GET_CODE (pattern) == SET)
2420 return pattern;
2421
2422 s.insn = insn;
2423 s.nsets = 0;
2424 note_pattern_stores (pattern, record_set_data, &s);
2425
2426 /* Considered invariant insns have exactly one set. */
2427 gcc_assert (s.nsets == 1);
2428 return s.set;
2429 }
2430
2431 /* Emit move from SRC to DEST noting the equivalence with expression computed
2432 in INSN. */
2433
2434 static rtx_insn *
2435 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2436 {
2437 rtx_insn *new_rtx;
2438 const_rtx set = single_set_gcse (insn);
2439 rtx set2;
2440 rtx note;
2441 rtx eqv = NULL_RTX;
2442
2443 /* This should never fail since we're creating a reg->reg copy
2444 we've verified to be valid. */
2445
2446 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2447
2448 /* Note the equivalence for local CSE pass. Take the note from the old
2449 set if there was one. Otherwise record the SET_SRC from the old set
2450 unless DEST is also an operand of the SET_SRC. */
2451 set2 = single_set (new_rtx);
2452 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2453 return new_rtx;
2454 if ((note = find_reg_equal_equiv_note (insn)))
2455 eqv = XEXP (note, 0);
2456 else if (! REG_P (dest)
2457 || ! reg_mentioned_p (dest, SET_SRC (set)))
2458 eqv = SET_SRC (set);
2459
2460 if (eqv != NULL_RTX)
2461 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2462
2463 return new_rtx;
2464 }
2465
2466 /* Delete redundant computations.
2467 Deletion is done by changing the insn to copy the `reaching_reg' of
2468 the expression into the result of the SET. It is left to later passes
2469 to propagate the copy or eliminate it.
2470
2471 Return nonzero if a change is made. */
2472
2473 static int
2474 pre_delete (void)
2475 {
2476 unsigned int i;
2477 int changed;
2478 struct gcse_expr *expr;
2479 struct gcse_occr *occr;
2480
2481 changed = 0;
2482 for (i = 0; i < expr_hash_table.size; i++)
2483 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2484 {
2485 int indx = expr->bitmap_index;
2486
2487 /* We only need to search antic_occr since we require ANTLOC != 0. */
2488 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2489 {
2490 rtx_insn *insn = occr->insn;
2491 rtx set;
2492 basic_block bb = BLOCK_FOR_INSN (insn);
2493
2494 /* We only delete insns that have a single_set. */
2495 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2496 && (set = single_set (insn)) != 0
2497 && dbg_cnt (pre_insn))
2498 {
2499 /* Create a pseudo-reg to store the result of reaching
2500 expressions into. Get the mode for the new pseudo from
2501 the mode of the original destination pseudo. */
2502 if (expr->reaching_reg == NULL)
2503 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2504
2505 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2506 delete_insn (insn);
2507 occr->deleted_p = 1;
2508 changed = 1;
2509 gcse_subst_count++;
2510
2511 if (dump_file)
2512 {
2513 fprintf (dump_file,
2514 "PRE: redundant insn %d (expression %d) in ",
2515 INSN_UID (insn), indx);
2516 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2517 bb->index, REGNO (expr->reaching_reg));
2518 }
2519 }
2520 }
2521 }
2522
2523 return changed;
2524 }
2525
2526 /* Perform GCSE optimizations using PRE.
2527 This is called by one_pre_gcse_pass after all the dataflow analysis
2528 has been done.
2529
2530 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2531 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2532 Compiler Design and Implementation.
2533
2534 ??? A new pseudo reg is created to hold the reaching expression. The nice
2535 thing about the classical approach is that it would try to use an existing
2536 reg. If the register can't be adequately optimized [i.e. we introduce
2537 reload problems], one could add a pass here to propagate the new register
2538 through the block.
2539
2540 ??? We don't handle single sets in PARALLELs because we're [currently] not
2541 able to copy the rest of the parallel when we insert copies to create full
2542 redundancies from partial redundancies. However, there's no reason why we
2543 can't handle PARALLELs in the cases where there are no partial
2544 redundancies. */
2545
2546 static int
2547 pre_gcse (struct edge_list *edge_list)
2548 {
2549 unsigned int i;
2550 int did_insert, changed;
2551 struct gcse_expr **index_map;
2552 struct gcse_expr *expr;
2553
2554 /* Compute a mapping from expression number (`bitmap_index') to
2555 hash table entry. */
2556
2557 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2558 for (i = 0; i < expr_hash_table.size; i++)
2559 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2560 index_map[expr->bitmap_index] = expr;
2561
2562 /* Delete the redundant insns first so that
2563 - we know what register to use for the new insns and for the other
2564 ones with reaching expressions
2565 - we know which insns are redundant when we go to create copies */
2566
2567 changed = pre_delete ();
2568 did_insert = pre_edge_insert (edge_list, index_map);
2569
2570 /* In other places with reaching expressions, copy the expression to the
2571 specially allocated pseudo-reg that reaches the redundant expr. */
2572 pre_insert_copies ();
2573 if (did_insert)
2574 {
2575 commit_edge_insertions ();
2576 changed = 1;
2577 }
2578
2579 free (index_map);
2580 return changed;
2581 }
2582
2583 /* Top level routine to perform one PRE GCSE pass.
2584
2585 Return nonzero if a change was made. */
2586
2587 static int
2588 one_pre_gcse_pass (void)
2589 {
2590 int changed = 0;
2591
2592 gcse_subst_count = 0;
2593 gcse_create_count = 0;
2594
2595 /* Return if there's nothing to do, or it is too expensive. */
2596 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2597 || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2598 return 0;
2599
2600 /* We need alias. */
2601 init_alias_analysis ();
2602
2603 bytes_used = 0;
2604 gcc_obstack_init (&gcse_obstack);
2605 alloc_gcse_mem ();
2606
2607 alloc_hash_table (&expr_hash_table);
2608 add_noreturn_fake_exit_edges ();
2609 if (flag_gcse_lm)
2610 compute_ld_motion_mems ();
2611
2612 compute_hash_table (&expr_hash_table);
2613 if (flag_gcse_lm)
2614 trim_ld_motion_mems ();
2615 if (dump_file)
2616 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2617
2618 if (expr_hash_table.n_elems > 0)
2619 {
2620 struct edge_list *edge_list;
2621 alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2622 edge_list = compute_pre_data ();
2623 changed |= pre_gcse (edge_list);
2624 free_edge_list (edge_list);
2625 free_pre_mem ();
2626 }
2627
2628 if (flag_gcse_lm)
2629 free_ld_motion_mems ();
2630 remove_fake_exit_edges ();
2631 free_hash_table (&expr_hash_table);
2632
2633 free_gcse_mem ();
2634 obstack_free (&gcse_obstack, NULL);
2635
2636 /* We are finished with alias. */
2637 end_alias_analysis ();
2638
2639 if (dump_file)
2640 {
2641 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2642 current_function_name (), n_basic_blocks_for_fn (cfun),
2643 bytes_used);
2644 fprintf (dump_file, "%d substs, %d insns created\n",
2645 gcse_subst_count, gcse_create_count);
2646 }
2647
2648 return changed;
2649 }
2650 \f
2651 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2652 to INSN. If such notes are added to an insn which references a
2653 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2654 that note, because the following loop optimization pass requires
2655 them. */
2656
2657 /* ??? If there was a jump optimization pass after gcse and before loop,
2658 then we would not need to do this here, because jump would add the
2659 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2660
2661 static void
2662 add_label_notes (rtx x, rtx_insn *insn)
2663 {
2664 enum rtx_code code = GET_CODE (x);
2665 int i, j;
2666 const char *fmt;
2667
2668 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2669 {
2670 /* This code used to ignore labels that referred to dispatch tables to
2671 avoid flow generating (slightly) worse code.
2672
2673 We no longer ignore such label references (see LABEL_REF handling in
2674 mark_jump_label for additional information). */
2675
2676 /* There's no reason for current users to emit jump-insns with
2677 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2678 notes. */
2679 gcc_assert (!JUMP_P (insn));
2680 add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2681
2682 if (LABEL_P (label_ref_label (x)))
2683 LABEL_NUSES (label_ref_label (x))++;
2684
2685 return;
2686 }
2687
2688 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2689 {
2690 if (fmt[i] == 'e')
2691 add_label_notes (XEXP (x, i), insn);
2692 else if (fmt[i] == 'E')
2693 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2694 add_label_notes (XVECEXP (x, i, j), insn);
2695 }
2696 }
2697
2698 /* Code Hoisting variables and subroutines. */
2699
2700 /* Very busy expressions. */
2701 static sbitmap *hoist_vbein;
2702 static sbitmap *hoist_vbeout;
2703
2704 /* ??? We could compute post dominators and run this algorithm in
2705 reverse to perform tail merging, doing so would probably be
2706 more effective than the tail merging code in jump.c.
2707
2708 It's unclear if tail merging could be run in parallel with
2709 code hoisting. It would be nice. */
2710
2711 /* Allocate vars used for code hoisting analysis. */
2712
2713 static void
2714 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2715 {
2716 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2717 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2718 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2719
2720 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2721 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2722 }
2723
2724 /* Free vars used for code hoisting analysis. */
2725
2726 static void
2727 free_code_hoist_mem (void)
2728 {
2729 sbitmap_vector_free (antloc);
2730 sbitmap_vector_free (transp);
2731 sbitmap_vector_free (comp);
2732
2733 sbitmap_vector_free (hoist_vbein);
2734 sbitmap_vector_free (hoist_vbeout);
2735
2736 free_dominance_info (CDI_DOMINATORS);
2737 }
2738
2739 /* Compute the very busy expressions at entry/exit from each block.
2740
2741 An expression is very busy if all paths from a given point
2742 compute the expression. */
2743
2744 static void
2745 compute_code_hoist_vbeinout (void)
2746 {
2747 int changed, passes;
2748 basic_block bb;
2749
2750 bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2751 bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2752
2753 passes = 0;
2754 changed = 1;
2755
2756 while (changed)
2757 {
2758 changed = 0;
2759
2760 /* We scan the blocks in the reverse order to speed up
2761 the convergence. */
2762 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2763 {
2764 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2765 {
2766 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2767 hoist_vbein, bb);
2768
2769 /* Include expressions in VBEout that are calculated
2770 in BB and available at its end. */
2771 bitmap_ior (hoist_vbeout[bb->index],
2772 hoist_vbeout[bb->index], comp[bb->index]);
2773 }
2774
2775 changed |= bitmap_or_and (hoist_vbein[bb->index],
2776 antloc[bb->index],
2777 hoist_vbeout[bb->index],
2778 transp[bb->index]);
2779 }
2780
2781 passes++;
2782 }
2783
2784 if (dump_file)
2785 {
2786 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2787
2788 FOR_EACH_BB_FN (bb, cfun)
2789 {
2790 fprintf (dump_file, "vbein (%d): ", bb->index);
2791 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2792 fprintf (dump_file, "vbeout(%d): ", bb->index);
2793 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2794 }
2795 }
2796 }
2797
2798 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2799
2800 static void
2801 compute_code_hoist_data (void)
2802 {
2803 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2804 prune_expressions (false);
2805 compute_code_hoist_vbeinout ();
2806 calculate_dominance_info (CDI_DOMINATORS);
2807 if (dump_file)
2808 fprintf (dump_file, "\n");
2809 }
2810
2811 /* Update register pressure for BB when hoisting an expression from
2812 instruction FROM, if live ranges of inputs are shrunk. Also
2813 maintain live_in information if live range of register referred
2814 in FROM is shrunk.
2815
2816 Return 0 if register pressure doesn't change, otherwise return
2817 the number by which register pressure is decreased.
2818
2819 NOTE: Register pressure won't be increased in this function. */
2820
2821 static int
2822 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2823 {
2824 rtx dreg;
2825 rtx_insn *insn;
2826 basic_block succ_bb;
2827 df_ref use, op_ref;
2828 edge succ;
2829 edge_iterator ei;
2830 int decreased_pressure = 0;
2831 int nregs;
2832 enum reg_class pressure_class;
2833
2834 FOR_EACH_INSN_USE (use, from)
2835 {
2836 dreg = DF_REF_REAL_REG (use);
2837 /* The live range of register is shrunk only if it isn't:
2838 1. referred on any path from the end of this block to EXIT, or
2839 2. referred by insns other than FROM in this block. */
2840 FOR_EACH_EDGE (succ, ei, bb->succs)
2841 {
2842 succ_bb = succ->dest;
2843 if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2844 continue;
2845
2846 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2847 break;
2848 }
2849 if (succ != NULL)
2850 continue;
2851
2852 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2853 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2854 {
2855 if (!DF_REF_INSN_INFO (op_ref))
2856 continue;
2857
2858 insn = DF_REF_INSN (op_ref);
2859 if (BLOCK_FOR_INSN (insn) == bb
2860 && NONDEBUG_INSN_P (insn) && insn != from)
2861 break;
2862 }
2863
2864 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2865 /* Decrease register pressure and update live_in information for
2866 this block. */
2867 if (!op_ref && pressure_class != NO_REGS)
2868 {
2869 decreased_pressure += nregs;
2870 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2871 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2872 }
2873 }
2874 return decreased_pressure;
2875 }
2876
2877 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2878 flow graph, if it can reach BB unimpared. Stop the search if the
2879 expression would need to be moved more than DISTANCE instructions.
2880
2881 DISTANCE is the number of instructions through which EXPR can be
2882 hoisted up in flow graph.
2883
2884 BB_SIZE points to an array which contains the number of instructions
2885 for each basic block.
2886
2887 PRESSURE_CLASS and NREGS are register class and number of hard registers
2888 for storing EXPR.
2889
2890 HOISTED_BBS points to a bitmap indicating basic blocks through which
2891 EXPR is hoisted.
2892
2893 FROM is the instruction from which EXPR is hoisted.
2894
2895 It's unclear exactly what Muchnick meant by "unimpared". It seems
2896 to me that the expression must either be computed or transparent in
2897 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2898 would allow the expression to be hoisted out of loops, even if
2899 the expression wasn't a loop invariant.
2900
2901 Contrast this to reachability for PRE where an expression is
2902 considered reachable if *any* path reaches instead of *all*
2903 paths. */
2904
2905 static int
2906 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2907 basic_block bb, sbitmap visited,
2908 HOST_WIDE_INT distance,
2909 int *bb_size, enum reg_class pressure_class,
2910 int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2911 {
2912 unsigned int i;
2913 edge pred;
2914 edge_iterator ei;
2915 sbitmap_iterator sbi;
2916 int visited_allocated_locally = 0;
2917 int decreased_pressure = 0;
2918
2919 if (flag_ira_hoist_pressure)
2920 {
2921 /* Record old information of basic block BB when it is visited
2922 at the first time. */
2923 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2924 {
2925 struct bb_data *data = BB_DATA (bb);
2926 bitmap_copy (data->backup, data->live_in);
2927 data->old_pressure = data->max_reg_pressure[pressure_class];
2928 }
2929 decreased_pressure = update_bb_reg_pressure (bb, from);
2930 }
2931 /* Terminate the search if distance, for which EXPR is allowed to move,
2932 is exhausted. */
2933 if (distance > 0)
2934 {
2935 if (flag_ira_hoist_pressure)
2936 {
2937 /* Prefer to hoist EXPR if register pressure is decreased. */
2938 if (decreased_pressure > *nregs)
2939 distance += bb_size[bb->index];
2940 /* Let EXPR be hoisted through basic block at no cost if one
2941 of following conditions is satisfied:
2942
2943 1. The basic block has low register pressure.
2944 2. Register pressure won't be increases after hoisting EXPR.
2945
2946 Constant expressions is handled conservatively, because
2947 hoisting constant expression aggressively results in worse
2948 code. This decision is made by the observation of CSiBE
2949 on ARM target, while it has no obvious effect on other
2950 targets like x86, x86_64, mips and powerpc. */
2951 else if (CONST_INT_P (expr->expr)
2952 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2953 >= ira_class_hard_regs_num[pressure_class]
2954 && decreased_pressure < *nregs))
2955 distance -= bb_size[bb->index];
2956 }
2957 else
2958 distance -= bb_size[bb->index];
2959
2960 if (distance <= 0)
2961 return 0;
2962 }
2963 else
2964 gcc_assert (distance == 0);
2965
2966 if (visited == NULL)
2967 {
2968 visited_allocated_locally = 1;
2969 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2970 bitmap_clear (visited);
2971 }
2972
2973 FOR_EACH_EDGE (pred, ei, bb->preds)
2974 {
2975 basic_block pred_bb = pred->src;
2976
2977 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2978 break;
2979 else if (pred_bb == expr_bb)
2980 continue;
2981 else if (bitmap_bit_p (visited, pred_bb->index))
2982 continue;
2983 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2984 break;
2985 /* Not killed. */
2986 else
2987 {
2988 bitmap_set_bit (visited, pred_bb->index);
2989 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2990 visited, distance, bb_size,
2991 pressure_class, nregs,
2992 hoisted_bbs, from))
2993 break;
2994 }
2995 }
2996 if (visited_allocated_locally)
2997 {
2998 /* If EXPR can be hoisted to expr_bb, record basic blocks through
2999 which EXPR is hoisted in hoisted_bbs. */
3000 if (flag_ira_hoist_pressure && !pred)
3001 {
3002 /* Record the basic block from which EXPR is hoisted. */
3003 bitmap_set_bit (visited, bb->index);
3004 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3005 bitmap_set_bit (hoisted_bbs, i);
3006 }
3007 sbitmap_free (visited);
3008 }
3009
3010 return (pred == NULL);
3011 }
3012 \f
3013 /* Find occurrence in BB. */
3014
3015 static struct gcse_occr *
3016 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3017 {
3018 /* Find the right occurrence of this expression. */
3019 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3020 occr = occr->next;
3021
3022 return occr;
3023 }
3024
3025 /* Actually perform code hoisting.
3026
3027 The code hoisting pass can hoist multiple computations of the same
3028 expression along dominated path to a dominating basic block, like
3029 from b2/b3 to b1 as depicted below:
3030
3031 b1 ------
3032 /\ |
3033 / \ |
3034 bx by distance
3035 / \ |
3036 / \ |
3037 b2 b3 ------
3038
3039 Unfortunately code hoisting generally extends the live range of an
3040 output pseudo register, which increases register pressure and hurts
3041 register allocation. To address this issue, an attribute MAX_DISTANCE
3042 is computed and attached to each expression. The attribute is computed
3043 from rtx cost of the corresponding expression and it's used to control
3044 how long the expression can be hoisted up in flow graph. As the
3045 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3046 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3047 register pressure if live ranges of inputs are shrunk.
3048
3049 Option "-fira-hoist-pressure" implements register pressure directed
3050 hoist based on upper method. The rationale is:
3051 1. Calculate register pressure for each basic block by reusing IRA
3052 facility.
3053 2. When expression is hoisted through one basic block, GCC checks
3054 the change of live ranges for inputs/output. The basic block's
3055 register pressure will be increased because of extended live
3056 range of output. However, register pressure will be decreased
3057 if the live ranges of inputs are shrunk.
3058 3. After knowing how hoisting affects register pressure, GCC prefers
3059 to hoist the expression if it can decrease register pressure, by
3060 increasing DISTANCE of the corresponding expression.
3061 4. If hoisting the expression increases register pressure, GCC checks
3062 register pressure of the basic block and decrease DISTANCE only if
3063 the register pressure is high. In other words, expression will be
3064 hoisted through at no cost if the basic block has low register
3065 pressure.
3066 5. Update register pressure information for basic blocks through
3067 which expression is hoisted. */
3068
3069 static int
3070 hoist_code (void)
3071 {
3072 basic_block bb, dominated;
3073 vec<basic_block> dom_tree_walk;
3074 unsigned int dom_tree_walk_index;
3075 vec<basic_block> domby;
3076 unsigned int i, j, k;
3077 struct gcse_expr **index_map;
3078 struct gcse_expr *expr;
3079 int *to_bb_head;
3080 int *bb_size;
3081 int changed = 0;
3082 struct bb_data *data;
3083 /* Basic blocks that have occurrences reachable from BB. */
3084 bitmap from_bbs;
3085 /* Basic blocks through which expr is hoisted. */
3086 bitmap hoisted_bbs = NULL;
3087 bitmap_iterator bi;
3088
3089 /* Compute a mapping from expression number (`bitmap_index') to
3090 hash table entry. */
3091
3092 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3093 for (i = 0; i < expr_hash_table.size; i++)
3094 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3095 index_map[expr->bitmap_index] = expr;
3096
3097 /* Calculate sizes of basic blocks and note how far
3098 each instruction is from the start of its block. We then use this
3099 data to restrict distance an expression can travel. */
3100
3101 to_bb_head = XCNEWVEC (int, get_max_uid ());
3102 bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3103
3104 FOR_EACH_BB_FN (bb, cfun)
3105 {
3106 rtx_insn *insn;
3107 int to_head;
3108
3109 to_head = 0;
3110 FOR_BB_INSNS (bb, insn)
3111 {
3112 /* Don't count debug instructions to avoid them affecting
3113 decision choices. */
3114 if (NONDEBUG_INSN_P (insn))
3115 to_bb_head[INSN_UID (insn)] = to_head++;
3116 }
3117
3118 bb_size[bb->index] = to_head;
3119 }
3120
3121 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3122 && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3123 == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3124
3125 from_bbs = BITMAP_ALLOC (NULL);
3126 if (flag_ira_hoist_pressure)
3127 hoisted_bbs = BITMAP_ALLOC (NULL);
3128
3129 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3130 ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3131
3132 /* Walk over each basic block looking for potentially hoistable
3133 expressions, nothing gets hoisted from the entry block. */
3134 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3135 {
3136 domby = get_dominated_to_depth (CDI_DOMINATORS, bb,
3137 param_max_hoist_depth);
3138
3139 if (domby.length () == 0)
3140 continue;
3141
3142 /* Examine each expression that is very busy at the exit of this
3143 block. These are the potentially hoistable expressions. */
3144 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3145 {
3146 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3147 {
3148 int nregs = 0;
3149 enum reg_class pressure_class = NO_REGS;
3150 /* Current expression. */
3151 struct gcse_expr *expr = index_map[i];
3152 /* Number of occurrences of EXPR that can be hoisted to BB. */
3153 int hoistable = 0;
3154 /* Occurrences reachable from BB. */
3155 vec<occr_t> occrs_to_hoist = vNULL;
3156 /* We want to insert the expression into BB only once, so
3157 note when we've inserted it. */
3158 int insn_inserted_p;
3159 occr_t occr;
3160
3161 /* If an expression is computed in BB and is available at end of
3162 BB, hoist all occurrences dominated by BB to BB. */
3163 if (bitmap_bit_p (comp[bb->index], i))
3164 {
3165 occr = find_occr_in_bb (expr->antic_occr, bb);
3166
3167 if (occr)
3168 {
3169 /* An occurrence might've been already deleted
3170 while processing a dominator of BB. */
3171 if (!occr->deleted_p)
3172 {
3173 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3174 hoistable++;
3175 }
3176 }
3177 else
3178 hoistable++;
3179 }
3180
3181 /* We've found a potentially hoistable expression, now
3182 we look at every block BB dominates to see if it
3183 computes the expression. */
3184 FOR_EACH_VEC_ELT (domby, j, dominated)
3185 {
3186 HOST_WIDE_INT max_distance;
3187
3188 /* Ignore self dominance. */
3189 if (bb == dominated)
3190 continue;
3191 /* We've found a dominated block, now see if it computes
3192 the busy expression and whether or not moving that
3193 expression to the "beginning" of that block is safe. */
3194 if (!bitmap_bit_p (antloc[dominated->index], i))
3195 continue;
3196
3197 occr = find_occr_in_bb (expr->antic_occr, dominated);
3198 gcc_assert (occr);
3199
3200 /* An occurrence might've been already deleted
3201 while processing a dominator of BB. */
3202 if (occr->deleted_p)
3203 continue;
3204 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3205
3206 max_distance = expr->max_distance;
3207 if (max_distance > 0)
3208 /* Adjust MAX_DISTANCE to account for the fact that
3209 OCCR won't have to travel all of DOMINATED, but
3210 only part of it. */
3211 max_distance += (bb_size[dominated->index]
3212 - to_bb_head[INSN_UID (occr->insn)]);
3213
3214 pressure_class = get_pressure_class_and_nregs (occr->insn,
3215 &nregs);
3216
3217 /* Note if the expression should be hoisted from the dominated
3218 block to BB if it can reach DOMINATED unimpared.
3219
3220 Keep track of how many times this expression is hoistable
3221 from a dominated block into BB. */
3222 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3223 max_distance, bb_size,
3224 pressure_class, &nregs,
3225 hoisted_bbs, occr->insn))
3226 {
3227 hoistable++;
3228 occrs_to_hoist.safe_push (occr);
3229 bitmap_set_bit (from_bbs, dominated->index);
3230 }
3231 }
3232
3233 /* If we found more than one hoistable occurrence of this
3234 expression, then note it in the vector of expressions to
3235 hoist. It makes no sense to hoist things which are computed
3236 in only one BB, and doing so tends to pessimize register
3237 allocation. One could increase this value to try harder
3238 to avoid any possible code expansion due to register
3239 allocation issues; however experiments have shown that
3240 the vast majority of hoistable expressions are only movable
3241 from two successors, so raising this threshold is likely
3242 to nullify any benefit we get from code hoisting. */
3243 if (hoistable > 1 && dbg_cnt (hoist_insn))
3244 {
3245 /* If (hoistable != vec::length), then there is
3246 an occurrence of EXPR in BB itself. Don't waste
3247 time looking for LCA in this case. */
3248 if ((unsigned) hoistable == occrs_to_hoist.length ())
3249 {
3250 basic_block lca;
3251
3252 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3253 from_bbs);
3254 if (lca != bb)
3255 /* Punt, it's better to hoist these occurrences to
3256 LCA. */
3257 occrs_to_hoist.release ();
3258 }
3259 }
3260 else
3261 /* Punt, no point hoisting a single occurrence. */
3262 occrs_to_hoist.release ();
3263
3264 if (flag_ira_hoist_pressure
3265 && !occrs_to_hoist.is_empty ())
3266 {
3267 /* Increase register pressure of basic blocks to which
3268 expr is hoisted because of extended live range of
3269 output. */
3270 data = BB_DATA (bb);
3271 data->max_reg_pressure[pressure_class] += nregs;
3272 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3273 {
3274 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3275 data->max_reg_pressure[pressure_class] += nregs;
3276 }
3277 }
3278 else if (flag_ira_hoist_pressure)
3279 {
3280 /* Restore register pressure and live_in info for basic
3281 blocks recorded in hoisted_bbs when expr will not be
3282 hoisted. */
3283 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3284 {
3285 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3286 bitmap_copy (data->live_in, data->backup);
3287 data->max_reg_pressure[pressure_class]
3288 = data->old_pressure;
3289 }
3290 }
3291
3292 if (flag_ira_hoist_pressure)
3293 bitmap_clear (hoisted_bbs);
3294
3295 insn_inserted_p = 0;
3296
3297 /* Walk through occurrences of I'th expressions we want
3298 to hoist to BB and make the transformations. */
3299 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3300 {
3301 rtx_insn *insn;
3302 const_rtx set;
3303
3304 gcc_assert (!occr->deleted_p);
3305
3306 insn = occr->insn;
3307 set = single_set_gcse (insn);
3308
3309 /* Create a pseudo-reg to store the result of reaching
3310 expressions into. Get the mode for the new pseudo
3311 from the mode of the original destination pseudo.
3312
3313 It is important to use new pseudos whenever we
3314 emit a set. This will allow reload to use
3315 rematerialization for such registers. */
3316 if (!insn_inserted_p)
3317 expr->reaching_reg
3318 = gen_reg_rtx_and_attrs (SET_DEST (set));
3319
3320 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3321 insn);
3322 delete_insn (insn);
3323 occr->deleted_p = 1;
3324 changed = 1;
3325 gcse_subst_count++;
3326
3327 if (!insn_inserted_p)
3328 {
3329 insert_insn_end_basic_block (expr, bb);
3330 insn_inserted_p = 1;
3331 }
3332 }
3333
3334 occrs_to_hoist.release ();
3335 bitmap_clear (from_bbs);
3336 }
3337 }
3338 domby.release ();
3339 }
3340
3341 dom_tree_walk.release ();
3342 BITMAP_FREE (from_bbs);
3343 if (flag_ira_hoist_pressure)
3344 BITMAP_FREE (hoisted_bbs);
3345
3346 free (bb_size);
3347 free (to_bb_head);
3348 free (index_map);
3349
3350 return changed;
3351 }
3352
3353 /* Return pressure class and number of needed hard registers (through
3354 *NREGS) of register REGNO. */
3355 static enum reg_class
3356 get_regno_pressure_class (int regno, int *nregs)
3357 {
3358 if (regno >= FIRST_PSEUDO_REGISTER)
3359 {
3360 enum reg_class pressure_class;
3361
3362 pressure_class = reg_allocno_class (regno);
3363 pressure_class = ira_pressure_class_translate[pressure_class];
3364 *nregs
3365 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3366 return pressure_class;
3367 }
3368 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3369 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3370 {
3371 *nregs = 1;
3372 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3373 }
3374 else
3375 {
3376 *nregs = 0;
3377 return NO_REGS;
3378 }
3379 }
3380
3381 /* Return pressure class and number of hard registers (through *NREGS)
3382 for destination of INSN. */
3383 static enum reg_class
3384 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3385 {
3386 rtx reg;
3387 enum reg_class pressure_class;
3388 const_rtx set = single_set_gcse (insn);
3389
3390 reg = SET_DEST (set);
3391 if (GET_CODE (reg) == SUBREG)
3392 reg = SUBREG_REG (reg);
3393 if (MEM_P (reg))
3394 {
3395 *nregs = 0;
3396 pressure_class = NO_REGS;
3397 }
3398 else
3399 {
3400 gcc_assert (REG_P (reg));
3401 pressure_class = reg_allocno_class (REGNO (reg));
3402 pressure_class = ira_pressure_class_translate[pressure_class];
3403 *nregs
3404 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3405 }
3406 return pressure_class;
3407 }
3408
3409 /* Increase (if INCR_P) or decrease current register pressure for
3410 register REGNO. */
3411 static void
3412 change_pressure (int regno, bool incr_p)
3413 {
3414 int nregs;
3415 enum reg_class pressure_class;
3416
3417 pressure_class = get_regno_pressure_class (regno, &nregs);
3418 if (! incr_p)
3419 curr_reg_pressure[pressure_class] -= nregs;
3420 else
3421 {
3422 curr_reg_pressure[pressure_class] += nregs;
3423 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3424 < curr_reg_pressure[pressure_class])
3425 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3426 = curr_reg_pressure[pressure_class];
3427 }
3428 }
3429
3430 /* Calculate register pressure for each basic block by walking insns
3431 from last to first. */
3432 static void
3433 calculate_bb_reg_pressure (void)
3434 {
3435 int i;
3436 unsigned int j;
3437 rtx_insn *insn;
3438 basic_block bb;
3439 bitmap curr_regs_live;
3440 bitmap_iterator bi;
3441
3442
3443 ira_setup_eliminable_regset ();
3444 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3445 FOR_EACH_BB_FN (bb, cfun)
3446 {
3447 curr_bb = bb;
3448 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3449 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3450 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3451 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3452 for (i = 0; i < ira_pressure_classes_num; i++)
3453 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3454 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3455 change_pressure (j, true);
3456
3457 FOR_BB_INSNS_REVERSE (bb, insn)
3458 {
3459 rtx dreg;
3460 int regno;
3461 df_ref def, use;
3462
3463 if (! NONDEBUG_INSN_P (insn))
3464 continue;
3465
3466 FOR_EACH_INSN_DEF (def, insn)
3467 {
3468 dreg = DF_REF_REAL_REG (def);
3469 gcc_assert (REG_P (dreg));
3470 regno = REGNO (dreg);
3471 if (!(DF_REF_FLAGS (def)
3472 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3473 {
3474 if (bitmap_clear_bit (curr_regs_live, regno))
3475 change_pressure (regno, false);
3476 }
3477 }
3478
3479 FOR_EACH_INSN_USE (use, insn)
3480 {
3481 dreg = DF_REF_REAL_REG (use);
3482 gcc_assert (REG_P (dreg));
3483 regno = REGNO (dreg);
3484 if (bitmap_set_bit (curr_regs_live, regno))
3485 change_pressure (regno, true);
3486 }
3487 }
3488 }
3489 BITMAP_FREE (curr_regs_live);
3490
3491 if (dump_file == NULL)
3492 return;
3493
3494 fprintf (dump_file, "\nRegister Pressure: \n");
3495 FOR_EACH_BB_FN (bb, cfun)
3496 {
3497 fprintf (dump_file, " Basic block %d: \n", bb->index);
3498 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3499 {
3500 enum reg_class pressure_class;
3501
3502 pressure_class = ira_pressure_classes[i];
3503 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3504 continue;
3505
3506 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3507 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3508 }
3509 }
3510 fprintf (dump_file, "\n");
3511 }
3512
3513 /* Top level routine to perform one code hoisting (aka unification) pass
3514
3515 Return nonzero if a change was made. */
3516
3517 static int
3518 one_code_hoisting_pass (void)
3519 {
3520 int changed = 0;
3521
3522 gcse_subst_count = 0;
3523 gcse_create_count = 0;
3524
3525 /* Return if there's nothing to do, or it is too expensive. */
3526 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3527 || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3528 return 0;
3529
3530 doing_code_hoisting_p = true;
3531
3532 /* Calculate register pressure for each basic block. */
3533 if (flag_ira_hoist_pressure)
3534 {
3535 regstat_init_n_sets_and_refs ();
3536 ira_set_pseudo_classes (false, dump_file);
3537 alloc_aux_for_blocks (sizeof (struct bb_data));
3538 calculate_bb_reg_pressure ();
3539 regstat_free_n_sets_and_refs ();
3540 }
3541
3542 /* We need alias. */
3543 init_alias_analysis ();
3544
3545 bytes_used = 0;
3546 gcc_obstack_init (&gcse_obstack);
3547 alloc_gcse_mem ();
3548
3549 alloc_hash_table (&expr_hash_table);
3550 compute_hash_table (&expr_hash_table);
3551 if (dump_file)
3552 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3553
3554 if (expr_hash_table.n_elems > 0)
3555 {
3556 alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3557 expr_hash_table.n_elems);
3558 compute_code_hoist_data ();
3559 changed = hoist_code ();
3560 free_code_hoist_mem ();
3561 }
3562
3563 if (flag_ira_hoist_pressure)
3564 {
3565 free_aux_for_blocks ();
3566 free_reg_info ();
3567 }
3568 free_hash_table (&expr_hash_table);
3569 free_gcse_mem ();
3570 obstack_free (&gcse_obstack, NULL);
3571
3572 /* We are finished with alias. */
3573 end_alias_analysis ();
3574
3575 if (dump_file)
3576 {
3577 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3578 current_function_name (), n_basic_blocks_for_fn (cfun),
3579 bytes_used);
3580 fprintf (dump_file, "%d substs, %d insns created\n",
3581 gcse_subst_count, gcse_create_count);
3582 }
3583
3584 doing_code_hoisting_p = false;
3585
3586 return changed;
3587 }
3588 \f
3589 /* Here we provide the things required to do store motion towards the exit.
3590 In order for this to be effective, gcse also needed to be taught how to
3591 move a load when it is killed only by a store to itself.
3592
3593 int i;
3594 float a[10];
3595
3596 void foo(float scale)
3597 {
3598 for (i=0; i<10; i++)
3599 a[i] *= scale;
3600 }
3601
3602 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3603 the load out since its live around the loop, and stored at the bottom
3604 of the loop.
3605
3606 The 'Load Motion' referred to and implemented in this file is
3607 an enhancement to gcse which when using edge based LCM, recognizes
3608 this situation and allows gcse to move the load out of the loop.
3609
3610 Once gcse has hoisted the load, store motion can then push this
3611 load towards the exit, and we end up with no loads or stores of 'i'
3612 in the loop. */
3613
3614 /* This will search the ldst list for a matching expression. If it
3615 doesn't find one, we create one and initialize it. */
3616
3617 static struct ls_expr *
3618 ldst_entry (rtx x)
3619 {
3620 int do_not_record_p = 0;
3621 struct ls_expr * ptr;
3622 unsigned int hash;
3623 ls_expr **slot;
3624 struct ls_expr e;
3625
3626 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3627 NULL, /*have_reg_qty=*/false);
3628
3629 e.pattern = x;
3630 slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3631 if (*slot)
3632 return *slot;
3633
3634 ptr = XNEW (struct ls_expr);
3635
3636 ptr->next = pre_ldst_mems;
3637 ptr->expr = NULL;
3638 ptr->pattern = x;
3639 ptr->pattern_regs = NULL_RTX;
3640 ptr->stores.create (0);
3641 ptr->reaching_reg = NULL_RTX;
3642 ptr->invalid = 0;
3643 ptr->index = 0;
3644 ptr->hash_index = hash;
3645 pre_ldst_mems = ptr;
3646 *slot = ptr;
3647
3648 return ptr;
3649 }
3650
3651 /* Free up an individual ldst entry. */
3652
3653 static void
3654 free_ldst_entry (struct ls_expr * ptr)
3655 {
3656 ptr->stores.release ();
3657
3658 free (ptr);
3659 }
3660
3661 /* Free up all memory associated with the ldst list. */
3662
3663 static void
3664 free_ld_motion_mems (void)
3665 {
3666 delete pre_ldst_table;
3667 pre_ldst_table = NULL;
3668
3669 while (pre_ldst_mems)
3670 {
3671 struct ls_expr * tmp = pre_ldst_mems;
3672
3673 pre_ldst_mems = pre_ldst_mems->next;
3674
3675 free_ldst_entry (tmp);
3676 }
3677
3678 pre_ldst_mems = NULL;
3679 }
3680
3681 /* Dump debugging info about the ldst list. */
3682
3683 static void
3684 print_ldst_list (FILE * file)
3685 {
3686 struct ls_expr * ptr;
3687
3688 fprintf (file, "LDST list: \n");
3689
3690 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3691 {
3692 fprintf (file, " Pattern (%3d): ", ptr->index);
3693
3694 print_rtl (file, ptr->pattern);
3695
3696 fprintf (file, "\n Stores : ");
3697 print_rtx_insn_vec (file, ptr->stores);
3698
3699 fprintf (file, "\n\n");
3700 }
3701
3702 fprintf (file, "\n");
3703 }
3704
3705 /* Returns 1 if X is in the list of ldst only expressions. */
3706
3707 static struct ls_expr *
3708 find_rtx_in_ldst (rtx x)
3709 {
3710 struct ls_expr e;
3711 ls_expr **slot;
3712 if (!pre_ldst_table)
3713 return NULL;
3714 e.pattern = x;
3715 slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3716 if (!slot || (*slot)->invalid)
3717 return NULL;
3718 return *slot;
3719 }
3720 \f
3721 /* Load Motion for loads which only kill themselves. */
3722
3723 /* Return true if x, a MEM, is a simple access with no side effects.
3724 These are the types of loads we consider for the ld_motion list,
3725 otherwise we let the usual aliasing take care of it. */
3726
3727 static int
3728 simple_mem (const_rtx x)
3729 {
3730 if (MEM_VOLATILE_P (x))
3731 return 0;
3732
3733 if (GET_MODE (x) == BLKmode)
3734 return 0;
3735
3736 /* If we are handling exceptions, we must be careful with memory references
3737 that may trap. If we are not, the behavior is undefined, so we may just
3738 continue. */
3739 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3740 return 0;
3741
3742 if (side_effects_p (x))
3743 return 0;
3744
3745 /* Do not consider function arguments passed on stack. */
3746 if (reg_mentioned_p (stack_pointer_rtx, x))
3747 return 0;
3748
3749 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3750 return 0;
3751
3752 return 1;
3753 }
3754
3755 /* Make sure there isn't a buried reference in this pattern anywhere.
3756 If there is, invalidate the entry for it since we're not capable
3757 of fixing it up just yet.. We have to be sure we know about ALL
3758 loads since the aliasing code will allow all entries in the
3759 ld_motion list to not-alias itself. If we miss a load, we will get
3760 the wrong value since gcse might common it and we won't know to
3761 fix it up. */
3762
3763 static void
3764 invalidate_any_buried_refs (rtx x)
3765 {
3766 const char * fmt;
3767 int i, j;
3768 struct ls_expr * ptr;
3769
3770 /* Invalidate it in the list. */
3771 if (MEM_P (x) && simple_mem (x))
3772 {
3773 ptr = ldst_entry (x);
3774 ptr->invalid = 1;
3775 }
3776
3777 /* Recursively process the insn. */
3778 fmt = GET_RTX_FORMAT (GET_CODE (x));
3779
3780 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3781 {
3782 if (fmt[i] == 'e')
3783 invalidate_any_buried_refs (XEXP (x, i));
3784 else if (fmt[i] == 'E')
3785 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3786 invalidate_any_buried_refs (XVECEXP (x, i, j));
3787 }
3788 }
3789
3790 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3791 being defined as MEM loads and stores to symbols, with no side effects
3792 and no registers in the expression. For a MEM destination, we also
3793 check that the insn is still valid if we replace the destination with a
3794 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3795 which don't match this criteria, they are invalidated and trimmed out
3796 later. */
3797
3798 static void
3799 compute_ld_motion_mems (void)
3800 {
3801 struct ls_expr * ptr;
3802 basic_block bb;
3803 rtx_insn *insn;
3804
3805 pre_ldst_mems = NULL;
3806 pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3807
3808 FOR_EACH_BB_FN (bb, cfun)
3809 {
3810 FOR_BB_INSNS (bb, insn)
3811 {
3812 if (NONDEBUG_INSN_P (insn))
3813 {
3814 if (GET_CODE (PATTERN (insn)) == SET)
3815 {
3816 rtx src = SET_SRC (PATTERN (insn));
3817 rtx dest = SET_DEST (PATTERN (insn));
3818
3819 /* Check for a simple load. */
3820 if (MEM_P (src) && simple_mem (src))
3821 {
3822 ptr = ldst_entry (src);
3823 if (!REG_P (dest))
3824 ptr->invalid = 1;
3825 }
3826 else
3827 {
3828 /* Make sure there isn't a buried load somewhere. */
3829 invalidate_any_buried_refs (src);
3830 }
3831
3832 /* Check for a simple load through a REG_EQUAL note. */
3833 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3834 if (note
3835 && REG_NOTE_KIND (note) == REG_EQUAL
3836 && (src_eq = XEXP (note, 0))
3837 && !(MEM_P (src_eq) && simple_mem (src_eq)))
3838 invalidate_any_buried_refs (src_eq);
3839
3840 /* Check for stores. Don't worry about aliased ones, they
3841 will block any movement we might do later. We only care
3842 about this exact pattern since those are the only
3843 circumstance that we will ignore the aliasing info. */
3844 if (MEM_P (dest) && simple_mem (dest))
3845 {
3846 ptr = ldst_entry (dest);
3847 machine_mode src_mode = GET_MODE (src);
3848 if (! MEM_P (src)
3849 && GET_CODE (src) != ASM_OPERANDS
3850 /* Check for REG manually since want_to_gcse_p
3851 returns 0 for all REGs. */
3852 && can_assign_to_reg_without_clobbers_p (src,
3853 src_mode))
3854 ptr->stores.safe_push (insn);
3855 else
3856 ptr->invalid = 1;
3857 }
3858 }
3859 else
3860 {
3861 /* Invalidate all MEMs in the pattern and... */
3862 invalidate_any_buried_refs (PATTERN (insn));
3863
3864 /* ...in REG_EQUAL notes for PARALLELs with single SET. */
3865 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3866 if (note
3867 && REG_NOTE_KIND (note) == REG_EQUAL
3868 && (src_eq = XEXP (note, 0)))
3869 invalidate_any_buried_refs (src_eq);
3870 }
3871 }
3872 }
3873 }
3874 }
3875
3876 /* Remove any references that have been either invalidated or are not in the
3877 expression list for pre gcse. */
3878
3879 static void
3880 trim_ld_motion_mems (void)
3881 {
3882 struct ls_expr * * last = & pre_ldst_mems;
3883 struct ls_expr * ptr = pre_ldst_mems;
3884
3885 while (ptr != NULL)
3886 {
3887 struct gcse_expr * expr;
3888
3889 /* Delete if entry has been made invalid. */
3890 if (! ptr->invalid)
3891 {
3892 /* Delete if we cannot find this mem in the expression list. */
3893 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3894
3895 for (expr = expr_hash_table.table[hash];
3896 expr != NULL;
3897 expr = expr->next_same_hash)
3898 if (expr_equiv_p (expr->expr, ptr->pattern))
3899 break;
3900 }
3901 else
3902 expr = (struct gcse_expr *) 0;
3903
3904 if (expr)
3905 {
3906 /* Set the expression field if we are keeping it. */
3907 ptr->expr = expr;
3908 last = & ptr->next;
3909 ptr = ptr->next;
3910 }
3911 else
3912 {
3913 *last = ptr->next;
3914 pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3915 free_ldst_entry (ptr);
3916 ptr = * last;
3917 }
3918 }
3919
3920 /* Show the world what we've found. */
3921 if (dump_file && pre_ldst_mems != NULL)
3922 print_ldst_list (dump_file);
3923 }
3924
3925 /* This routine will take an expression which we are replacing with
3926 a reaching register, and update any stores that are needed if
3927 that expression is in the ld_motion list. Stores are updated by
3928 copying their SRC to the reaching register, and then storing
3929 the reaching register into the store location. These keeps the
3930 correct value in the reaching register for the loads. */
3931
3932 static void
3933 update_ld_motion_stores (struct gcse_expr * expr)
3934 {
3935 struct ls_expr * mem_ptr;
3936
3937 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3938 {
3939 /* We can try to find just the REACHED stores, but is shouldn't
3940 matter to set the reaching reg everywhere... some might be
3941 dead and should be eliminated later. */
3942
3943 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3944 where reg is the reaching reg used in the load. We checked in
3945 compute_ld_motion_mems that we can replace (set mem expr) with
3946 (set reg expr) in that insn. */
3947 rtx_insn *insn;
3948 unsigned int i;
3949 FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3950 {
3951 rtx pat = PATTERN (insn);
3952 rtx src = SET_SRC (pat);
3953 rtx reg = expr->reaching_reg;
3954
3955 /* If we've already copied it, continue. */
3956 if (expr->reaching_reg == src)
3957 continue;
3958
3959 if (dump_file)
3960 {
3961 fprintf (dump_file, "PRE: store updated with reaching reg ");
3962 print_rtl (dump_file, reg);
3963 fprintf (dump_file, ":\n ");
3964 print_inline_rtx (dump_file, insn, 8);
3965 fprintf (dump_file, "\n");
3966 }
3967
3968 rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3969 emit_insn_before (copy, insn);
3970 SET_SRC (pat) = reg;
3971 df_insn_rescan (insn);
3972
3973 /* un-recognize this pattern since it's probably different now. */
3974 INSN_CODE (insn) = -1;
3975 gcse_create_count++;
3976 }
3977 }
3978 }
3979 \f
3980 /* Return true if the graph is too expensive to optimize. PASS is the
3981 optimization about to be performed. */
3982
3983 bool
3984 gcse_or_cprop_is_too_expensive (const char *pass)
3985 {
3986 int memory_request = (n_basic_blocks_for_fn (cfun)
3987 * SBITMAP_SET_SIZE (max_reg_num ())
3988 * sizeof (SBITMAP_ELT_TYPE));
3989
3990 /* Trying to perform global optimizations on flow graphs which have
3991 a high connectivity will take a long time and is unlikely to be
3992 particularly useful.
3993
3994 In normal circumstances a cfg should have about twice as many
3995 edges as blocks. But we do not want to punish small functions
3996 which have a couple switch statements. Rather than simply
3997 threshold the number of blocks, uses something with a more
3998 graceful degradation. */
3999 if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
4000 {
4001 warning (OPT_Wdisabled_optimization,
4002 "%s: %d basic blocks and %d edges/basic block",
4003 pass, n_basic_blocks_for_fn (cfun),
4004 n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
4005
4006 return true;
4007 }
4008
4009 /* If allocating memory for the dataflow bitmaps would take up too much
4010 storage it's better just to disable the optimization. */
4011 if (memory_request > param_max_gcse_memory)
4012 {
4013 warning (OPT_Wdisabled_optimization,
4014 "%s: %d basic blocks and %d registers; "
4015 "increase %<--param max-gcse-memory%> above %d",
4016 pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
4017 memory_request);
4018
4019 return true;
4020 }
4021
4022 return false;
4023 }
4024 \f
4025 static unsigned int
4026 execute_rtl_pre (void)
4027 {
4028 int changed;
4029 delete_unreachable_blocks ();
4030 df_analyze ();
4031 changed = one_pre_gcse_pass ();
4032 flag_rerun_cse_after_global_opts |= changed;
4033 if (changed)
4034 cleanup_cfg (0);
4035 return 0;
4036 }
4037
4038 static unsigned int
4039 execute_rtl_hoist (void)
4040 {
4041 int changed;
4042 delete_unreachable_blocks ();
4043 df_analyze ();
4044 changed = one_code_hoisting_pass ();
4045 flag_rerun_cse_after_global_opts |= changed;
4046 if (changed)
4047 cleanup_cfg (0);
4048 return 0;
4049 }
4050
4051 namespace {
4052
4053 const pass_data pass_data_rtl_pre =
4054 {
4055 RTL_PASS, /* type */
4056 "rtl pre", /* name */
4057 OPTGROUP_NONE, /* optinfo_flags */
4058 TV_PRE, /* tv_id */
4059 PROP_cfglayout, /* properties_required */
4060 0, /* properties_provided */
4061 0, /* properties_destroyed */
4062 0, /* todo_flags_start */
4063 TODO_df_finish, /* todo_flags_finish */
4064 };
4065
4066 class pass_rtl_pre : public rtl_opt_pass
4067 {
4068 public:
4069 pass_rtl_pre (gcc::context *ctxt)
4070 : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4071 {}
4072
4073 /* opt_pass methods: */
4074 virtual bool gate (function *);
4075 virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4076
4077 }; // class pass_rtl_pre
4078
4079 /* We do not construct an accurate cfg in functions which call
4080 setjmp, so none of these passes runs if the function calls
4081 setjmp.
4082 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4083
4084 bool
4085 pass_rtl_pre::gate (function *fun)
4086 {
4087 return optimize > 0 && flag_gcse
4088 && !fun->calls_setjmp
4089 && optimize_function_for_speed_p (fun)
4090 && dbg_cnt (pre);
4091 }
4092
4093 } // anon namespace
4094
4095 rtl_opt_pass *
4096 make_pass_rtl_pre (gcc::context *ctxt)
4097 {
4098 return new pass_rtl_pre (ctxt);
4099 }
4100
4101 namespace {
4102
4103 const pass_data pass_data_rtl_hoist =
4104 {
4105 RTL_PASS, /* type */
4106 "hoist", /* name */
4107 OPTGROUP_NONE, /* optinfo_flags */
4108 TV_HOIST, /* tv_id */
4109 PROP_cfglayout, /* properties_required */
4110 0, /* properties_provided */
4111 0, /* properties_destroyed */
4112 0, /* todo_flags_start */
4113 TODO_df_finish, /* todo_flags_finish */
4114 };
4115
4116 class pass_rtl_hoist : public rtl_opt_pass
4117 {
4118 public:
4119 pass_rtl_hoist (gcc::context *ctxt)
4120 : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4121 {}
4122
4123 /* opt_pass methods: */
4124 virtual bool gate (function *);
4125 virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4126
4127 }; // class pass_rtl_hoist
4128
4129 bool
4130 pass_rtl_hoist::gate (function *)
4131 {
4132 return optimize > 0 && flag_gcse
4133 && !cfun->calls_setjmp
4134 /* It does not make sense to run code hoisting unless we are optimizing
4135 for code size -- it rarely makes programs faster, and can make then
4136 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4137 && optimize_function_for_size_p (cfun)
4138 && dbg_cnt (hoist);
4139 }
4140
4141 } // anon namespace
4142
4143 rtl_opt_pass *
4144 make_pass_rtl_hoist (gcc::context *ctxt)
4145 {
4146 return new pass_rtl_hoist (ctxt);
4147 }
4148
4149 /* Reset all state within gcse.c so that we can rerun the compiler
4150 within the same process. For use by toplev::finalize. */
4151
4152 void
4153 gcse_c_finalize (void)
4154 {
4155 test_insn = NULL;
4156 }
4157
4158 #include "gt-gcse.h"