]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
re PR rtl-optimization/78812 (Wrong code generation due to hoisting memory load acros...
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "params.h"
158 #include "intl.h"
159 #include "tree-pass.h"
160 #include "dbgcnt.h"
161 #include "gcse.h"
162 #include "gcse-common.h"
163
164 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
165 are a superset of those done by classic GCSE.
166
167 Two passes of copy/constant propagation are done around PRE or hoisting
168 because the first one enables more GCSE and the second one helps to clean
169 up the copies that PRE and HOIST create. This is needed more for PRE than
170 for HOIST because code hoisting will try to use an existing register
171 containing the common subexpression rather than create a new one. This is
172 harder to do for PRE because of the code motion (which HOIST doesn't do).
173
174 Expressions we are interested in GCSE-ing are of the form
175 (set (pseudo-reg) (expression)).
176 Function want_to_gcse_p says what these are.
177
178 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179 This allows PRE to hoist expressions that are expressed in multiple insns,
180 such as complex address calculations (e.g. for PIC code, or loads with a
181 high part and a low part).
182
183 PRE handles moving invariant expressions out of loops (by treating them as
184 partially redundant).
185
186 **********************
187
188 We used to support multiple passes but there are diminishing returns in
189 doing so. The first pass usually makes 90% of the changes that are doable.
190 A second pass can make a few more changes made possible by the first pass.
191 Experiments show any further passes don't make enough changes to justify
192 the expense.
193
194 A study of spec92 using an unlimited number of passes:
195 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
196 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
197 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
198
199 It was found doing copy propagation between each pass enables further
200 substitutions.
201
202 This study was done before expressions in REG_EQUAL notes were added as
203 candidate expressions for optimization, and before the GIMPLE optimizers
204 were added. Probably, multiple passes is even less efficient now than
205 at the time when the study was conducted.
206
207 PRE is quite expensive in complicated functions because the DFA can take
208 a while to converge. Hence we only perform one pass.
209
210 **********************
211
212 The steps for PRE are:
213
214 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
215
216 2) Perform the data flow analysis for PRE.
217
218 3) Delete the redundant instructions
219
220 4) Insert the required copies [if any] that make the partially
221 redundant instructions fully redundant.
222
223 5) For other reaching expressions, insert an instruction to copy the value
224 to a newly created pseudo that will reach the redundant instruction.
225
226 The deletion is done first so that when we do insertions we
227 know which pseudo reg to use.
228
229 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
230 argue it is not. The number of iterations for the algorithm to converge
231 is typically 2-4 so I don't view it as that expensive (relatively speaking).
232
233 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234 we create. To make an expression reach the place where it's redundant,
235 the result of the expression is copied to a new register, and the redundant
236 expression is deleted by replacing it with this new register. Classic GCSE
237 doesn't have this problem as much as it computes the reaching defs of
238 each register in each block and thus can try to use an existing
239 register. */
240 \f
241 /* GCSE global vars. */
242
243 struct target_gcse default_target_gcse;
244 #if SWITCHABLE_TARGET
245 struct target_gcse *this_target_gcse = &default_target_gcse;
246 #endif
247
248 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
249 int flag_rerun_cse_after_global_opts;
250
251 /* An obstack for our working variables. */
252 static struct obstack gcse_obstack;
253
254 /* Hash table of expressions. */
255
256 struct gcse_expr
257 {
258 /* The expression. */
259 rtx expr;
260 /* Index in the available expression bitmaps. */
261 int bitmap_index;
262 /* Next entry with the same hash. */
263 struct gcse_expr *next_same_hash;
264 /* List of anticipatable occurrences in basic blocks in the function.
265 An "anticipatable occurrence" is one that is the first occurrence in the
266 basic block, the operands are not modified in the basic block prior
267 to the occurrence and the output is not used between the start of
268 the block and the occurrence. */
269 struct gcse_occr *antic_occr;
270 /* List of available occurrence in basic blocks in the function.
271 An "available occurrence" is one that is the last occurrence in the
272 basic block and the operands are not modified by following statements in
273 the basic block [including this insn]. */
274 struct gcse_occr *avail_occr;
275 /* Non-null if the computation is PRE redundant.
276 The value is the newly created pseudo-reg to record a copy of the
277 expression in all the places that reach the redundant copy. */
278 rtx reaching_reg;
279 /* Maximum distance in instructions this expression can travel.
280 We avoid moving simple expressions for more than a few instructions
281 to keep register pressure under control.
282 A value of "0" removes restrictions on how far the expression can
283 travel. */
284 int max_distance;
285 };
286
287 /* Occurrence of an expression.
288 There is one per basic block. If a pattern appears more than once the
289 last appearance is used [or first for anticipatable expressions]. */
290
291 struct gcse_occr
292 {
293 /* Next occurrence of this expression. */
294 struct gcse_occr *next;
295 /* The insn that computes the expression. */
296 rtx_insn *insn;
297 /* Nonzero if this [anticipatable] occurrence has been deleted. */
298 char deleted_p;
299 /* Nonzero if this [available] occurrence has been copied to
300 reaching_reg. */
301 /* ??? This is mutually exclusive with deleted_p, so they could share
302 the same byte. */
303 char copied_p;
304 };
305
306 typedef struct gcse_occr *occr_t;
307
308 /* Expression hash tables.
309 Each hash table is an array of buckets.
310 ??? It is known that if it were an array of entries, structure elements
311 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
312 not clear whether in the final analysis a sufficient amount of memory would
313 be saved as the size of the available expression bitmaps would be larger
314 [one could build a mapping table without holes afterwards though].
315 Someday I'll perform the computation and figure it out. */
316
317 struct gcse_hash_table_d
318 {
319 /* The table itself.
320 This is an array of `expr_hash_table_size' elements. */
321 struct gcse_expr **table;
322
323 /* Size of the hash table, in elements. */
324 unsigned int size;
325
326 /* Number of hash table elements. */
327 unsigned int n_elems;
328 };
329
330 /* Expression hash table. */
331 static struct gcse_hash_table_d expr_hash_table;
332
333 /* This is a list of expressions which are MEMs and will be used by load
334 or store motion.
335 Load motion tracks MEMs which aren't killed by anything except itself,
336 i.e. loads and stores to a single location.
337 We can then allow movement of these MEM refs with a little special
338 allowance. (all stores copy the same value to the reaching reg used
339 for the loads). This means all values used to store into memory must have
340 no side effects so we can re-issue the setter value. */
341
342 struct ls_expr
343 {
344 struct gcse_expr * expr; /* Gcse expression reference for LM. */
345 rtx pattern; /* Pattern of this mem. */
346 rtx pattern_regs; /* List of registers mentioned by the mem. */
347 vec<rtx_insn *> stores; /* INSN list of stores seen. */
348 struct ls_expr * next; /* Next in the list. */
349 int invalid; /* Invalid for some reason. */
350 int index; /* If it maps to a bitmap index. */
351 unsigned int hash_index; /* Index when in a hash table. */
352 rtx reaching_reg; /* Register to use when re-writing. */
353 };
354
355 /* Head of the list of load/store memory refs. */
356 static struct ls_expr * pre_ldst_mems = NULL;
357
358 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 {
360 typedef value_type compare_type;
361 static inline hashval_t hash (const ls_expr *);
362 static inline bool equal (const ls_expr *, const ls_expr *);
363 };
364
365 /* Hashtable helpers. */
366 inline hashval_t
367 pre_ldst_expr_hasher::hash (const ls_expr *x)
368 {
369 int do_not_record_p = 0;
370 return
371 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
372 }
373
374 static int expr_equiv_p (const_rtx, const_rtx);
375
376 inline bool
377 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
378 const ls_expr *ptr2)
379 {
380 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
381 }
382
383 /* Hashtable for the load/store memory refs. */
384 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385
386 /* Bitmap containing one bit for each register in the program.
387 Used when performing GCSE to track which registers have been set since
388 the start of the basic block. */
389 static regset reg_set_bitmap;
390
391 /* Array, indexed by basic block number for a list of insns which modify
392 memory within that block. */
393 static vec<rtx_insn *> *modify_mem_list;
394 static bitmap modify_mem_list_set;
395
396 /* This array parallels modify_mem_list, except that it stores MEMs
397 being set and their canonicalized memory addresses. */
398 static vec<modify_pair> *canon_modify_mem_list;
399
400 /* Bitmap indexed by block numbers to record which blocks contain
401 function calls. */
402 static bitmap blocks_with_calls;
403
404 /* Various variables for statistics gathering. */
405
406 /* Memory used in a pass.
407 This isn't intended to be absolutely precise. Its intent is only
408 to keep an eye on memory usage. */
409 static int bytes_used;
410
411 /* GCSE substitutions made. */
412 static int gcse_subst_count;
413 /* Number of copy instructions created. */
414 static int gcse_create_count;
415 \f
416 /* Doing code hoisting. */
417 static bool doing_code_hoisting_p = false;
418 \f
419 /* For available exprs */
420 static sbitmap *ae_kill;
421 \f
422 /* Data stored for each basic block. */
423 struct bb_data
424 {
425 /* Maximal register pressure inside basic block for given register class
426 (defined only for the pressure classes). */
427 int max_reg_pressure[N_REG_CLASSES];
428 /* Recorded register pressure of basic block before trying to hoist
429 an expression. Will be used to restore the register pressure
430 if the expression should not be hoisted. */
431 int old_pressure;
432 /* Recorded register live_in info of basic block during code hoisting
433 process. BACKUP is used to record live_in info before trying to
434 hoist an expression, and will be used to restore LIVE_IN if the
435 expression should not be hoisted. */
436 bitmap live_in, backup;
437 };
438
439 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
440
441 static basic_block curr_bb;
442
443 /* Current register pressure for each pressure class. */
444 static int curr_reg_pressure[N_REG_CLASSES];
445 \f
446
447 static void compute_can_copy (void);
448 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
449 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450 static void *gcse_alloc (unsigned long);
451 static void alloc_gcse_mem (void);
452 static void free_gcse_mem (void);
453 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
458 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
459 static int oprs_available_p (const_rtx, const rtx_insn *);
460 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461 int, struct gcse_hash_table_d *);
462 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463 static void record_last_reg_set_info (rtx_insn *, int);
464 static void record_last_mem_set_info (rtx_insn *);
465 static void record_last_set_info (rtx, const_rtx, void *);
466 static void compute_hash_table (struct gcse_hash_table_d *);
467 static void alloc_hash_table (struct gcse_hash_table_d *);
468 static void free_hash_table (struct gcse_hash_table_d *);
469 static void compute_hash_table_work (struct gcse_hash_table_d *);
470 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472 struct gcse_hash_table_d *);
473 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 static void alloc_pre_mem (int, int);
476 static void free_pre_mem (void);
477 static struct edge_list *compute_pre_data (void);
478 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479 basic_block);
480 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
481 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 static void pre_insert_copies (void);
483 static int pre_delete (void);
484 static int pre_gcse (struct edge_list *);
485 static int one_pre_gcse_pass (void);
486 static void add_label_notes (rtx, rtx_insn *);
487 static void alloc_code_hoist_mem (int, int);
488 static void free_code_hoist_mem (void);
489 static void compute_code_hoist_vbeinout (void);
490 static void compute_code_hoist_data (void);
491 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *, basic_block,
492 sbitmap, int, int *, enum reg_class,
493 int *, bitmap, rtx_insn *);
494 static int hoist_code (void);
495 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
496 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
497 static int one_code_hoisting_pass (void);
498 static rtx_insn *process_insert_insn (struct gcse_expr *);
499 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
500 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
501 basic_block, char *);
502 static struct ls_expr * ldst_entry (rtx);
503 static void free_ldst_entry (struct ls_expr *);
504 static void free_ld_motion_mems (void);
505 static void print_ldst_list (FILE *);
506 static struct ls_expr * find_rtx_in_ldst (rtx);
507 static int simple_mem (const_rtx);
508 static void invalidate_any_buried_refs (rtx);
509 static void compute_ld_motion_mems (void);
510 static void trim_ld_motion_mems (void);
511 static void update_ld_motion_stores (struct gcse_expr *);
512 static void clear_modify_mem_tables (void);
513 static void free_modify_mem_tables (void);
514
515 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
516 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
517
518 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
519 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
520
521 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
522 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
523
524 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
525 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
526 \f
527 /* Misc. utilities. */
528
529 #define can_copy \
530 (this_target_gcse->x_can_copy)
531 #define can_copy_init_p \
532 (this_target_gcse->x_can_copy_init_p)
533
534 /* Compute which modes support reg/reg copy operations. */
535
536 static void
537 compute_can_copy (void)
538 {
539 int i;
540 #ifndef AVOID_CCMODE_COPIES
541 rtx reg;
542 rtx_insn *insn;
543 #endif
544 memset (can_copy, 0, NUM_MACHINE_MODES);
545
546 start_sequence ();
547 for (i = 0; i < NUM_MACHINE_MODES; i++)
548 if (GET_MODE_CLASS (i) == MODE_CC)
549 {
550 #ifdef AVOID_CCMODE_COPIES
551 can_copy[i] = 0;
552 #else
553 reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
554 insn = emit_insn (gen_rtx_SET (reg, reg));
555 if (recog (PATTERN (insn), insn, NULL) >= 0)
556 can_copy[i] = 1;
557 #endif
558 }
559 else
560 can_copy[i] = 1;
561
562 end_sequence ();
563 }
564
565 /* Returns whether the mode supports reg/reg copy operations. */
566
567 bool
568 can_copy_p (machine_mode mode)
569 {
570 if (! can_copy_init_p)
571 {
572 compute_can_copy ();
573 can_copy_init_p = true;
574 }
575
576 return can_copy[mode] != 0;
577 }
578 \f
579 /* Cover function to xmalloc to record bytes allocated. */
580
581 static void *
582 gmalloc (size_t size)
583 {
584 bytes_used += size;
585 return xmalloc (size);
586 }
587
588 /* Cover function to xcalloc to record bytes allocated. */
589
590 static void *
591 gcalloc (size_t nelem, size_t elsize)
592 {
593 bytes_used += nelem * elsize;
594 return xcalloc (nelem, elsize);
595 }
596
597 /* Cover function to obstack_alloc. */
598
599 static void *
600 gcse_alloc (unsigned long size)
601 {
602 bytes_used += size;
603 return obstack_alloc (&gcse_obstack, size);
604 }
605
606 /* Allocate memory for the reg/memory set tracking tables.
607 This is called at the start of each pass. */
608
609 static void
610 alloc_gcse_mem (void)
611 {
612 /* Allocate vars to track sets of regs. */
613 reg_set_bitmap = ALLOC_REG_SET (NULL);
614
615 /* Allocate array to keep a list of insns which modify memory in each
616 basic block. The two typedefs are needed to work around the
617 pre-processor limitation with template types in macro arguments. */
618 typedef vec<rtx_insn *> vec_rtx_heap;
619 typedef vec<modify_pair> vec_modify_pair_heap;
620 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
621 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
622 last_basic_block_for_fn (cfun));
623 modify_mem_list_set = BITMAP_ALLOC (NULL);
624 blocks_with_calls = BITMAP_ALLOC (NULL);
625 }
626
627 /* Free memory allocated by alloc_gcse_mem. */
628
629 static void
630 free_gcse_mem (void)
631 {
632 FREE_REG_SET (reg_set_bitmap);
633
634 free_modify_mem_tables ();
635 BITMAP_FREE (modify_mem_list_set);
636 BITMAP_FREE (blocks_with_calls);
637 }
638 \f
639 /* Compute the local properties of each recorded expression.
640
641 Local properties are those that are defined by the block, irrespective of
642 other blocks.
643
644 An expression is transparent in a block if its operands are not modified
645 in the block.
646
647 An expression is computed (locally available) in a block if it is computed
648 at least once and expression would contain the same value if the
649 computation was moved to the end of the block.
650
651 An expression is locally anticipatable in a block if it is computed at
652 least once and expression would contain the same value if the computation
653 was moved to the beginning of the block.
654
655 We call this routine for pre and code hoisting. They all compute
656 basically the same information and thus can easily share this code.
657
658 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
659 properties. If NULL, then it is not necessary to compute or record that
660 particular property.
661
662 TABLE controls which hash table to look at. */
663
664 static void
665 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
666 struct gcse_hash_table_d *table)
667 {
668 unsigned int i;
669
670 /* Initialize any bitmaps that were passed in. */
671 if (transp)
672 {
673 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
674 }
675
676 if (comp)
677 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
678 if (antloc)
679 bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
680
681 for (i = 0; i < table->size; i++)
682 {
683 struct gcse_expr *expr;
684
685 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
686 {
687 int indx = expr->bitmap_index;
688 struct gcse_occr *occr;
689
690 /* The expression is transparent in this block if it is not killed.
691 We start by assuming all are transparent [none are killed], and
692 then reset the bits for those that are. */
693 if (transp)
694 compute_transp (expr->expr, indx, transp,
695 blocks_with_calls,
696 modify_mem_list_set,
697 canon_modify_mem_list);
698
699 /* The occurrences recorded in antic_occr are exactly those that
700 we want to set to nonzero in ANTLOC. */
701 if (antloc)
702 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
703 {
704 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
705
706 /* While we're scanning the table, this is a good place to
707 initialize this. */
708 occr->deleted_p = 0;
709 }
710
711 /* The occurrences recorded in avail_occr are exactly those that
712 we want to set to nonzero in COMP. */
713 if (comp)
714 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
715 {
716 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
717
718 /* While we're scanning the table, this is a good place to
719 initialize this. */
720 occr->copied_p = 0;
721 }
722
723 /* While we're scanning the table, this is a good place to
724 initialize this. */
725 expr->reaching_reg = 0;
726 }
727 }
728 }
729 \f
730 /* Hash table support. */
731
732 struct reg_avail_info
733 {
734 basic_block last_bb;
735 int first_set;
736 int last_set;
737 };
738
739 static struct reg_avail_info *reg_avail_info;
740 static basic_block current_bb;
741
742 /* See whether X, the source of a set, is something we want to consider for
743 GCSE. */
744
745 static int
746 want_to_gcse_p (rtx x, machine_mode mode, int *max_distance_ptr)
747 {
748 #ifdef STACK_REGS
749 /* On register stack architectures, don't GCSE constants from the
750 constant pool, as the benefits are often swamped by the overhead
751 of shuffling the register stack between basic blocks. */
752 if (IS_STACK_MODE (GET_MODE (x)))
753 x = avoid_constant_pool_reference (x);
754 #endif
755
756 /* GCSE'ing constants:
757
758 We do not specifically distinguish between constant and non-constant
759 expressions in PRE and Hoist. We use set_src_cost below to limit
760 the maximum distance simple expressions can travel.
761
762 Nevertheless, constants are much easier to GCSE, and, hence,
763 it is easy to overdo the optimizations. Usually, excessive PRE and
764 Hoisting of constant leads to increased register pressure.
765
766 RA can deal with this by rematerialing some of the constants.
767 Therefore, it is important that the back-end generates sets of constants
768 in a way that allows reload rematerialize them under high register
769 pressure, i.e., a pseudo register with REG_EQUAL to constant
770 is set only once. Failing to do so will result in IRA/reload
771 spilling such constants under high register pressure instead of
772 rematerializing them. */
773
774 switch (GET_CODE (x))
775 {
776 case REG:
777 case SUBREG:
778 case CALL:
779 return 0;
780
781 CASE_CONST_ANY:
782 if (!doing_code_hoisting_p)
783 /* Do not PRE constants. */
784 return 0;
785
786 /* FALLTHRU */
787
788 default:
789 if (doing_code_hoisting_p)
790 /* PRE doesn't implement max_distance restriction. */
791 {
792 int cost;
793 int max_distance;
794
795 gcc_assert (!optimize_function_for_speed_p (cfun)
796 && optimize_function_for_size_p (cfun));
797 cost = set_src_cost (x, mode, 0);
798
799 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
800 {
801 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
802 if (max_distance == 0)
803 return 0;
804
805 gcc_assert (max_distance > 0);
806 }
807 else
808 max_distance = 0;
809
810 if (max_distance_ptr)
811 *max_distance_ptr = max_distance;
812 }
813
814 return can_assign_to_reg_without_clobbers_p (x, mode);
815 }
816 }
817
818 /* Used internally by can_assign_to_reg_without_clobbers_p. */
819
820 static GTY(()) rtx_insn *test_insn;
821
822 /* Return true if we can assign X to a pseudo register of mode MODE
823 such that the resulting insn does not result in clobbering a hard
824 register as a side-effect.
825
826 Additionally, if the target requires it, check that the resulting insn
827 can be copied. If it cannot, this means that X is special and probably
828 has hidden side-effects we don't want to mess with.
829
830 This function is typically used by code motion passes, to verify
831 that it is safe to insert an insn without worrying about clobbering
832 maybe live hard regs. */
833
834 bool
835 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
836 {
837 int num_clobbers = 0;
838 int icode;
839 bool can_assign = false;
840
841 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
842 if (general_operand (x, mode))
843 return 1;
844 else if (GET_MODE (x) == VOIDmode)
845 return 0;
846
847 /* Otherwise, check if we can make a valid insn from it. First initialize
848 our test insn if we haven't already. */
849 if (test_insn == 0)
850 {
851 test_insn
852 = make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
853 FIRST_PSEUDO_REGISTER * 2),
854 const0_rtx));
855 SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
856 INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
857 }
858
859 /* Now make an insn like the one we would make when GCSE'ing and see if
860 valid. */
861 PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
862 SET_SRC (PATTERN (test_insn)) = x;
863
864 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
865
866 /* If the test insn is valid and doesn't need clobbers, and the target also
867 has no objections, we're good. */
868 if (icode >= 0
869 && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
870 && ! (targetm.cannot_copy_insn_p
871 && targetm.cannot_copy_insn_p (test_insn)))
872 can_assign = true;
873
874 /* Make sure test_insn doesn't have any pointers into GC space. */
875 SET_SRC (PATTERN (test_insn)) = NULL_RTX;
876
877 return can_assign;
878 }
879
880 /* Return nonzero if the operands of expression X are unchanged from the
881 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
882 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
883
884 static int
885 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
886 {
887 int i, j;
888 enum rtx_code code;
889 const char *fmt;
890
891 if (x == 0)
892 return 1;
893
894 code = GET_CODE (x);
895 switch (code)
896 {
897 case REG:
898 {
899 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
900
901 if (info->last_bb != current_bb)
902 return 1;
903 if (avail_p)
904 return info->last_set < DF_INSN_LUID (insn);
905 else
906 return info->first_set >= DF_INSN_LUID (insn);
907 }
908
909 case MEM:
910 if (! flag_gcse_lm
911 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
912 x, avail_p))
913 return 0;
914 else
915 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
916
917 case PRE_DEC:
918 case PRE_INC:
919 case POST_DEC:
920 case POST_INC:
921 case PRE_MODIFY:
922 case POST_MODIFY:
923 return 0;
924
925 case PC:
926 case CC0: /*FIXME*/
927 case CONST:
928 CASE_CONST_ANY:
929 case SYMBOL_REF:
930 case LABEL_REF:
931 case ADDR_VEC:
932 case ADDR_DIFF_VEC:
933 return 1;
934
935 default:
936 break;
937 }
938
939 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
940 {
941 if (fmt[i] == 'e')
942 {
943 /* If we are about to do the last recursive call needed at this
944 level, change it into iteration. This function is called enough
945 to be worth it. */
946 if (i == 0)
947 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
948
949 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
950 return 0;
951 }
952 else if (fmt[i] == 'E')
953 for (j = 0; j < XVECLEN (x, i); j++)
954 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
955 return 0;
956 }
957
958 return 1;
959 }
960
961 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
962
963 struct mem_conflict_info
964 {
965 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
966 see if a memory store conflicts with this memory load. */
967 const_rtx mem;
968
969 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
970 references. */
971 bool conflict;
972 };
973
974 /* DEST is the output of an instruction. If it is a memory reference and
975 possibly conflicts with the load found in DATA, then communicate this
976 information back through DATA. */
977
978 static void
979 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
980 void *data)
981 {
982 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
983
984 while (GET_CODE (dest) == SUBREG
985 || GET_CODE (dest) == ZERO_EXTRACT
986 || GET_CODE (dest) == STRICT_LOW_PART)
987 dest = XEXP (dest, 0);
988
989 /* If DEST is not a MEM, then it will not conflict with the load. Note
990 that function calls are assumed to clobber memory, but are handled
991 elsewhere. */
992 if (! MEM_P (dest))
993 return;
994
995 /* If we are setting a MEM in our list of specially recognized MEMs,
996 don't mark as killed this time. */
997 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
998 {
999 if (!find_rtx_in_ldst (dest))
1000 mci->conflict = true;
1001 return;
1002 }
1003
1004 if (true_dependence (dest, GET_MODE (dest), mci->mem))
1005 mci->conflict = true;
1006 }
1007
1008 /* Return nonzero if the expression in X (a memory reference) is killed
1009 in block BB before or after the insn with the LUID in UID_LIMIT.
1010 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1011 before UID_LIMIT.
1012
1013 To check the entire block, set UID_LIMIT to max_uid + 1 and
1014 AVAIL_P to 0. */
1015
1016 static int
1017 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1018 int avail_p)
1019 {
1020 vec<rtx_insn *> list = modify_mem_list[bb->index];
1021 rtx_insn *setter;
1022 unsigned ix;
1023
1024 /* If this is a readonly then we aren't going to be changing it. */
1025 if (MEM_READONLY_P (x))
1026 return 0;
1027
1028 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1029 {
1030 struct mem_conflict_info mci;
1031
1032 /* Ignore entries in the list that do not apply. */
1033 if ((avail_p
1034 && DF_INSN_LUID (setter) < uid_limit)
1035 || (! avail_p
1036 && DF_INSN_LUID (setter) > uid_limit))
1037 continue;
1038
1039 /* If SETTER is a call everything is clobbered. Note that calls
1040 to pure functions are never put on the list, so we need not
1041 worry about them. */
1042 if (CALL_P (setter))
1043 return 1;
1044
1045 /* SETTER must be an INSN of some kind that sets memory. Call
1046 note_stores to examine each hunk of memory that is modified. */
1047 mci.mem = x;
1048 mci.conflict = false;
1049 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1050 if (mci.conflict)
1051 return 1;
1052 }
1053 return 0;
1054 }
1055
1056 /* Return nonzero if the operands of expression X are unchanged from
1057 the start of INSN's basic block up to but not including INSN. */
1058
1059 static int
1060 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1061 {
1062 return oprs_unchanged_p (x, insn, 0);
1063 }
1064
1065 /* Return nonzero if the operands of expression X are unchanged from
1066 INSN to the end of INSN's basic block. */
1067
1068 static int
1069 oprs_available_p (const_rtx x, const rtx_insn *insn)
1070 {
1071 return oprs_unchanged_p (x, insn, 1);
1072 }
1073
1074 /* Hash expression X.
1075
1076 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1077 indicating if a volatile operand is found or if the expression contains
1078 something we don't want to insert in the table. HASH_TABLE_SIZE is
1079 the current size of the hash table to be probed. */
1080
1081 static unsigned int
1082 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1083 int hash_table_size)
1084 {
1085 unsigned int hash;
1086
1087 *do_not_record_p = 0;
1088
1089 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1090 return hash % hash_table_size;
1091 }
1092
1093 /* Return nonzero if exp1 is equivalent to exp2. */
1094
1095 static int
1096 expr_equiv_p (const_rtx x, const_rtx y)
1097 {
1098 return exp_equiv_p (x, y, 0, true);
1099 }
1100
1101 /* Insert expression X in INSN in the hash TABLE.
1102 If it is already present, record it as the last occurrence in INSN's
1103 basic block.
1104
1105 MODE is the mode of the value X is being stored into.
1106 It is only used if X is a CONST_INT.
1107
1108 ANTIC_P is nonzero if X is an anticipatable expression.
1109 AVAIL_P is nonzero if X is an available expression.
1110
1111 MAX_DISTANCE is the maximum distance in instructions this expression can
1112 be moved. */
1113
1114 static void
1115 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1116 int antic_p,
1117 int avail_p, int max_distance, struct gcse_hash_table_d *table)
1118 {
1119 int found, do_not_record_p;
1120 unsigned int hash;
1121 struct gcse_expr *cur_expr, *last_expr = NULL;
1122 struct gcse_occr *antic_occr, *avail_occr;
1123
1124 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1125
1126 /* Do not insert expression in table if it contains volatile operands,
1127 or if hash_expr determines the expression is something we don't want
1128 to or can't handle. */
1129 if (do_not_record_p)
1130 return;
1131
1132 cur_expr = table->table[hash];
1133 found = 0;
1134
1135 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1136 {
1137 /* If the expression isn't found, save a pointer to the end of
1138 the list. */
1139 last_expr = cur_expr;
1140 cur_expr = cur_expr->next_same_hash;
1141 }
1142
1143 if (! found)
1144 {
1145 cur_expr = GOBNEW (struct gcse_expr);
1146 bytes_used += sizeof (struct gcse_expr);
1147 if (table->table[hash] == NULL)
1148 /* This is the first pattern that hashed to this index. */
1149 table->table[hash] = cur_expr;
1150 else
1151 /* Add EXPR to end of this hash chain. */
1152 last_expr->next_same_hash = cur_expr;
1153
1154 /* Set the fields of the expr element. */
1155 cur_expr->expr = x;
1156 cur_expr->bitmap_index = table->n_elems++;
1157 cur_expr->next_same_hash = NULL;
1158 cur_expr->antic_occr = NULL;
1159 cur_expr->avail_occr = NULL;
1160 gcc_assert (max_distance >= 0);
1161 cur_expr->max_distance = max_distance;
1162 }
1163 else
1164 gcc_assert (cur_expr->max_distance == max_distance);
1165
1166 /* Now record the occurrence(s). */
1167 if (antic_p)
1168 {
1169 antic_occr = cur_expr->antic_occr;
1170
1171 if (antic_occr
1172 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1173 antic_occr = NULL;
1174
1175 if (antic_occr)
1176 /* Found another instance of the expression in the same basic block.
1177 Prefer the currently recorded one. We want the first one in the
1178 block and the block is scanned from start to end. */
1179 ; /* nothing to do */
1180 else
1181 {
1182 /* First occurrence of this expression in this basic block. */
1183 antic_occr = GOBNEW (struct gcse_occr);
1184 bytes_used += sizeof (struct gcse_occr);
1185 antic_occr->insn = insn;
1186 antic_occr->next = cur_expr->antic_occr;
1187 antic_occr->deleted_p = 0;
1188 cur_expr->antic_occr = antic_occr;
1189 }
1190 }
1191
1192 if (avail_p)
1193 {
1194 avail_occr = cur_expr->avail_occr;
1195
1196 if (avail_occr
1197 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1198 {
1199 /* Found another instance of the expression in the same basic block.
1200 Prefer this occurrence to the currently recorded one. We want
1201 the last one in the block and the block is scanned from start
1202 to end. */
1203 avail_occr->insn = insn;
1204 }
1205 else
1206 {
1207 /* First occurrence of this expression in this basic block. */
1208 avail_occr = GOBNEW (struct gcse_occr);
1209 bytes_used += sizeof (struct gcse_occr);
1210 avail_occr->insn = insn;
1211 avail_occr->next = cur_expr->avail_occr;
1212 avail_occr->deleted_p = 0;
1213 cur_expr->avail_occr = avail_occr;
1214 }
1215 }
1216 }
1217
1218 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1219
1220 static void
1221 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1222 {
1223 rtx src = SET_SRC (set);
1224 rtx dest = SET_DEST (set);
1225 rtx note;
1226
1227 if (GET_CODE (src) == CALL)
1228 hash_scan_call (src, insn, table);
1229
1230 else if (REG_P (dest))
1231 {
1232 unsigned int regno = REGNO (dest);
1233 int max_distance = 0;
1234
1235 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1236
1237 This allows us to do a single GCSE pass and still eliminate
1238 redundant constants, addresses or other expressions that are
1239 constructed with multiple instructions.
1240
1241 However, keep the original SRC if INSN is a simple reg-reg move.
1242 In this case, there will almost always be a REG_EQUAL note on the
1243 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1244 for INSN, we miss copy propagation opportunities and we perform the
1245 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1246 do more than one PRE GCSE pass.
1247
1248 Note that this does not impede profitable constant propagations. We
1249 "look through" reg-reg sets in lookup_avail_set. */
1250 note = find_reg_equal_equiv_note (insn);
1251 if (note != 0
1252 && REG_NOTE_KIND (note) == REG_EQUAL
1253 && !REG_P (src)
1254 && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1255 src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1256
1257 /* Only record sets of pseudo-regs in the hash table. */
1258 if (regno >= FIRST_PSEUDO_REGISTER
1259 /* Don't GCSE something if we can't do a reg/reg copy. */
1260 && can_copy_p (GET_MODE (dest))
1261 /* GCSE commonly inserts instruction after the insn. We can't
1262 do that easily for EH edges so disable GCSE on these for now. */
1263 /* ??? We can now easily create new EH landing pads at the
1264 gimple level, for splitting edges; there's no reason we
1265 can't do the same thing at the rtl level. */
1266 && !can_throw_internal (insn)
1267 /* Is SET_SRC something we want to gcse? */
1268 && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1269 /* Don't CSE a nop. */
1270 && ! set_noop_p (set)
1271 /* Don't GCSE if it has attached REG_EQUIV note.
1272 At this point this only function parameters should have
1273 REG_EQUIV notes and if the argument slot is used somewhere
1274 explicitly, it means address of parameter has been taken,
1275 so we should not extend the lifetime of the pseudo. */
1276 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1277 {
1278 /* An expression is not anticipatable if its operands are
1279 modified before this insn or if this is not the only SET in
1280 this insn. The latter condition does not have to mean that
1281 SRC itself is not anticipatable, but we just will not be
1282 able to handle code motion of insns with multiple sets. */
1283 int antic_p = oprs_anticipatable_p (src, insn)
1284 && !multiple_sets (insn);
1285 /* An expression is not available if its operands are
1286 subsequently modified, including this insn. It's also not
1287 available if this is a branch, because we can't insert
1288 a set after the branch. */
1289 int avail_p = (oprs_available_p (src, insn)
1290 && ! JUMP_P (insn));
1291
1292 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1293 max_distance, table);
1294 }
1295 }
1296 /* In case of store we want to consider the memory value as available in
1297 the REG stored in that memory. This makes it possible to remove
1298 redundant loads from due to stores to the same location. */
1299 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1300 {
1301 unsigned int regno = REGNO (src);
1302 int max_distance = 0;
1303
1304 /* Only record sets of pseudo-regs in the hash table. */
1305 if (regno >= FIRST_PSEUDO_REGISTER
1306 /* Don't GCSE something if we can't do a reg/reg copy. */
1307 && can_copy_p (GET_MODE (src))
1308 /* GCSE commonly inserts instruction after the insn. We can't
1309 do that easily for EH edges so disable GCSE on these for now. */
1310 && !can_throw_internal (insn)
1311 /* Is SET_DEST something we want to gcse? */
1312 && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1313 /* Don't CSE a nop. */
1314 && ! set_noop_p (set)
1315 /* Don't GCSE if it has attached REG_EQUIV note.
1316 At this point this only function parameters should have
1317 REG_EQUIV notes and if the argument slot is used somewhere
1318 explicitly, it means address of parameter has been taken,
1319 so we should not extend the lifetime of the pseudo. */
1320 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1321 || ! MEM_P (XEXP (note, 0))))
1322 {
1323 /* Stores are never anticipatable. */
1324 int antic_p = 0;
1325 /* An expression is not available if its operands are
1326 subsequently modified, including this insn. It's also not
1327 available if this is a branch, because we can't insert
1328 a set after the branch. */
1329 int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1330
1331 /* Record the memory expression (DEST) in the hash table. */
1332 insert_expr_in_table (dest, GET_MODE (dest), insn,
1333 antic_p, avail_p, max_distance, table);
1334 }
1335 }
1336 }
1337
1338 static void
1339 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1340 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1341 {
1342 /* Currently nothing to do. */
1343 }
1344
1345 static void
1346 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1347 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1348 {
1349 /* Currently nothing to do. */
1350 }
1351
1352 /* Process INSN and add hash table entries as appropriate. */
1353
1354 static void
1355 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1356 {
1357 rtx pat = PATTERN (insn);
1358 int i;
1359
1360 /* Pick out the sets of INSN and for other forms of instructions record
1361 what's been modified. */
1362
1363 if (GET_CODE (pat) == SET)
1364 hash_scan_set (pat, insn, table);
1365
1366 else if (GET_CODE (pat) == CLOBBER)
1367 hash_scan_clobber (pat, insn, table);
1368
1369 else if (GET_CODE (pat) == CALL)
1370 hash_scan_call (pat, insn, table);
1371
1372 else if (GET_CODE (pat) == PARALLEL)
1373 for (i = 0; i < XVECLEN (pat, 0); i++)
1374 {
1375 rtx x = XVECEXP (pat, 0, i);
1376
1377 if (GET_CODE (x) == SET)
1378 hash_scan_set (x, insn, table);
1379 else if (GET_CODE (x) == CLOBBER)
1380 hash_scan_clobber (x, insn, table);
1381 else if (GET_CODE (x) == CALL)
1382 hash_scan_call (x, insn, table);
1383 }
1384 }
1385
1386 /* Dump the hash table TABLE to file FILE under the name NAME. */
1387
1388 static void
1389 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1390 {
1391 int i;
1392 /* Flattened out table, so it's printed in proper order. */
1393 struct gcse_expr **flat_table;
1394 unsigned int *hash_val;
1395 struct gcse_expr *expr;
1396
1397 flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1398 hash_val = XNEWVEC (unsigned int, table->n_elems);
1399
1400 for (i = 0; i < (int) table->size; i++)
1401 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1402 {
1403 flat_table[expr->bitmap_index] = expr;
1404 hash_val[expr->bitmap_index] = i;
1405 }
1406
1407 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1408 name, table->size, table->n_elems);
1409
1410 for (i = 0; i < (int) table->n_elems; i++)
1411 if (flat_table[i] != 0)
1412 {
1413 expr = flat_table[i];
1414 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1415 expr->bitmap_index, hash_val[i], expr->max_distance);
1416 print_rtl (file, expr->expr);
1417 fprintf (file, "\n");
1418 }
1419
1420 fprintf (file, "\n");
1421
1422 free (flat_table);
1423 free (hash_val);
1424 }
1425
1426 /* Record register first/last/block set information for REGNO in INSN.
1427
1428 first_set records the first place in the block where the register
1429 is set and is used to compute "anticipatability".
1430
1431 last_set records the last place in the block where the register
1432 is set and is used to compute "availability".
1433
1434 last_bb records the block for which first_set and last_set are
1435 valid, as a quick test to invalidate them. */
1436
1437 static void
1438 record_last_reg_set_info (rtx_insn *insn, int regno)
1439 {
1440 struct reg_avail_info *info = &reg_avail_info[regno];
1441 int luid = DF_INSN_LUID (insn);
1442
1443 info->last_set = luid;
1444 if (info->last_bb != current_bb)
1445 {
1446 info->last_bb = current_bb;
1447 info->first_set = luid;
1448 }
1449 }
1450
1451 /* Record memory modification information for INSN. We do not actually care
1452 about the memory location(s) that are set, or even how they are set (consider
1453 a CALL_INSN). We merely need to record which insns modify memory. */
1454
1455 static void
1456 record_last_mem_set_info (rtx_insn *insn)
1457 {
1458 if (! flag_gcse_lm)
1459 return;
1460
1461 record_last_mem_set_info_common (insn, modify_mem_list,
1462 canon_modify_mem_list,
1463 modify_mem_list_set,
1464 blocks_with_calls);
1465 }
1466
1467 /* Called from compute_hash_table via note_stores to handle one
1468 SET or CLOBBER in an insn. DATA is really the instruction in which
1469 the SET is taking place. */
1470
1471 static void
1472 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1473 {
1474 rtx_insn *last_set_insn = (rtx_insn *) data;
1475
1476 if (GET_CODE (dest) == SUBREG)
1477 dest = SUBREG_REG (dest);
1478
1479 if (REG_P (dest))
1480 record_last_reg_set_info (last_set_insn, REGNO (dest));
1481 else if (MEM_P (dest)
1482 /* Ignore pushes, they clobber nothing. */
1483 && ! push_operand (dest, GET_MODE (dest)))
1484 record_last_mem_set_info (last_set_insn);
1485 }
1486
1487 /* Top level function to create an expression hash table.
1488
1489 Expression entries are placed in the hash table if
1490 - they are of the form (set (pseudo-reg) src),
1491 - src is something we want to perform GCSE on,
1492 - none of the operands are subsequently modified in the block
1493
1494 Currently src must be a pseudo-reg or a const_int.
1495
1496 TABLE is the table computed. */
1497
1498 static void
1499 compute_hash_table_work (struct gcse_hash_table_d *table)
1500 {
1501 int i;
1502
1503 /* re-Cache any INSN_LIST nodes we have allocated. */
1504 clear_modify_mem_tables ();
1505 /* Some working arrays used to track first and last set in each block. */
1506 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1507
1508 for (i = 0; i < max_reg_num (); ++i)
1509 reg_avail_info[i].last_bb = NULL;
1510
1511 FOR_EACH_BB_FN (current_bb, cfun)
1512 {
1513 rtx_insn *insn;
1514 unsigned int regno;
1515
1516 /* First pass over the instructions records information used to
1517 determine when registers and memory are first and last set. */
1518 FOR_BB_INSNS (current_bb, insn)
1519 {
1520 if (!NONDEBUG_INSN_P (insn))
1521 continue;
1522
1523 if (CALL_P (insn))
1524 {
1525 hard_reg_set_iterator hrsi;
1526 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1527 0, regno, hrsi)
1528 record_last_reg_set_info (insn, regno);
1529
1530 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1531 record_last_mem_set_info (insn);
1532 }
1533
1534 note_stores (PATTERN (insn), record_last_set_info, insn);
1535 }
1536
1537 /* The next pass builds the hash table. */
1538 FOR_BB_INSNS (current_bb, insn)
1539 if (NONDEBUG_INSN_P (insn))
1540 hash_scan_insn (insn, table);
1541 }
1542
1543 free (reg_avail_info);
1544 reg_avail_info = NULL;
1545 }
1546
1547 /* Allocate space for the set/expr hash TABLE.
1548 It is used to determine the number of buckets to use. */
1549
1550 static void
1551 alloc_hash_table (struct gcse_hash_table_d *table)
1552 {
1553 int n;
1554
1555 n = get_max_insn_count ();
1556
1557 table->size = n / 4;
1558 if (table->size < 11)
1559 table->size = 11;
1560
1561 /* Attempt to maintain efficient use of hash table.
1562 Making it an odd number is simplest for now.
1563 ??? Later take some measurements. */
1564 table->size |= 1;
1565 n = table->size * sizeof (struct gcse_expr *);
1566 table->table = GNEWVAR (struct gcse_expr *, n);
1567 }
1568
1569 /* Free things allocated by alloc_hash_table. */
1570
1571 static void
1572 free_hash_table (struct gcse_hash_table_d *table)
1573 {
1574 free (table->table);
1575 }
1576
1577 /* Compute the expression hash table TABLE. */
1578
1579 static void
1580 compute_hash_table (struct gcse_hash_table_d *table)
1581 {
1582 /* Initialize count of number of entries in hash table. */
1583 table->n_elems = 0;
1584 memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1585
1586 compute_hash_table_work (table);
1587 }
1588 \f
1589 /* Expression tracking support. */
1590
1591 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1592 static void
1593 clear_modify_mem_tables (void)
1594 {
1595 unsigned i;
1596 bitmap_iterator bi;
1597
1598 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1599 {
1600 modify_mem_list[i].release ();
1601 canon_modify_mem_list[i].release ();
1602 }
1603 bitmap_clear (modify_mem_list_set);
1604 bitmap_clear (blocks_with_calls);
1605 }
1606
1607 /* Release memory used by modify_mem_list_set. */
1608
1609 static void
1610 free_modify_mem_tables (void)
1611 {
1612 clear_modify_mem_tables ();
1613 free (modify_mem_list);
1614 free (canon_modify_mem_list);
1615 modify_mem_list = 0;
1616 canon_modify_mem_list = 0;
1617 }
1618 \f
1619 /* Compute PRE+LCM working variables. */
1620
1621 /* Local properties of expressions. */
1622
1623 /* Nonzero for expressions that are transparent in the block. */
1624 static sbitmap *transp;
1625
1626 /* Nonzero for expressions that are computed (available) in the block. */
1627 static sbitmap *comp;
1628
1629 /* Nonzero for expressions that are locally anticipatable in the block. */
1630 static sbitmap *antloc;
1631
1632 /* Nonzero for expressions where this block is an optimal computation
1633 point. */
1634 static sbitmap *pre_optimal;
1635
1636 /* Nonzero for expressions which are redundant in a particular block. */
1637 static sbitmap *pre_redundant;
1638
1639 /* Nonzero for expressions which should be inserted on a specific edge. */
1640 static sbitmap *pre_insert_map;
1641
1642 /* Nonzero for expressions which should be deleted in a specific block. */
1643 static sbitmap *pre_delete_map;
1644
1645 /* Allocate vars used for PRE analysis. */
1646
1647 static void
1648 alloc_pre_mem (int n_blocks, int n_exprs)
1649 {
1650 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1651 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1652 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1653
1654 pre_optimal = NULL;
1655 pre_redundant = NULL;
1656 pre_insert_map = NULL;
1657 pre_delete_map = NULL;
1658 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1659
1660 /* pre_insert and pre_delete are allocated later. */
1661 }
1662
1663 /* Free vars used for PRE analysis. */
1664
1665 static void
1666 free_pre_mem (void)
1667 {
1668 sbitmap_vector_free (transp);
1669 sbitmap_vector_free (comp);
1670
1671 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1672
1673 if (pre_optimal)
1674 sbitmap_vector_free (pre_optimal);
1675 if (pre_redundant)
1676 sbitmap_vector_free (pre_redundant);
1677 if (pre_insert_map)
1678 sbitmap_vector_free (pre_insert_map);
1679 if (pre_delete_map)
1680 sbitmap_vector_free (pre_delete_map);
1681
1682 transp = comp = NULL;
1683 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1684 }
1685
1686 /* Remove certain expressions from anticipatable and transparent
1687 sets of basic blocks that have incoming abnormal edge.
1688 For PRE remove potentially trapping expressions to avoid placing
1689 them on abnormal edges. For hoisting remove memory references that
1690 can be clobbered by calls. */
1691
1692 static void
1693 prune_expressions (bool pre_p)
1694 {
1695 struct gcse_expr *expr;
1696 unsigned int ui;
1697 basic_block bb;
1698
1699 auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1700 bitmap_clear (prune_exprs);
1701 for (ui = 0; ui < expr_hash_table.size; ui++)
1702 {
1703 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1704 {
1705 /* Note potentially trapping expressions. */
1706 if (may_trap_p (expr->expr))
1707 {
1708 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1709 continue;
1710 }
1711
1712 if (!pre_p && contains_mem_rtx_p (expr->expr))
1713 /* Note memory references that can be clobbered by a call.
1714 We do not split abnormal edges in hoisting, so would
1715 a memory reference get hoisted along an abnormal edge,
1716 it would be placed /before/ the call. Therefore, only
1717 constant memory references can be hoisted along abnormal
1718 edges. */
1719 {
1720 rtx x = expr->expr;
1721
1722 /* Common cases where we might find the MEM which may allow us
1723 to avoid pruning the expression. */
1724 while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1725 x = XEXP (x, 0);
1726
1727 /* If we found the MEM, go ahead and look at it to see if it has
1728 properties that allow us to avoid pruning its expression out
1729 of the tables. */
1730 if (MEM_P (x))
1731 {
1732 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1733 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1734 continue;
1735
1736 if (MEM_READONLY_P (x)
1737 && !MEM_VOLATILE_P (x)
1738 && MEM_NOTRAP_P (x))
1739 /* Constant memory reference, e.g., a PIC address. */
1740 continue;
1741 }
1742
1743 /* ??? Optimally, we would use interprocedural alias
1744 analysis to determine if this mem is actually killed
1745 by this call. */
1746
1747 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1748 }
1749 }
1750 }
1751
1752 FOR_EACH_BB_FN (bb, cfun)
1753 {
1754 edge e;
1755 edge_iterator ei;
1756
1757 /* If the current block is the destination of an abnormal edge, we
1758 kill all trapping (for PRE) and memory (for hoist) expressions
1759 because we won't be able to properly place the instruction on
1760 the edge. So make them neither anticipatable nor transparent.
1761 This is fairly conservative.
1762
1763 ??? For hoisting it may be necessary to check for set-and-jump
1764 instructions here, not just for abnormal edges. The general problem
1765 is that when an expression cannot not be placed right at the end of
1766 a basic block we should account for any side-effects of a subsequent
1767 jump instructions that could clobber the expression. It would
1768 be best to implement this check along the lines of
1769 should_hoist_expr_to_dom where the target block is already known
1770 and, hence, there's no need to conservatively prune expressions on
1771 "intermediate" set-and-jump instructions. */
1772 FOR_EACH_EDGE (e, ei, bb->preds)
1773 if ((e->flags & EDGE_ABNORMAL)
1774 && (pre_p || CALL_P (BB_END (e->src))))
1775 {
1776 bitmap_and_compl (antloc[bb->index],
1777 antloc[bb->index], prune_exprs);
1778 bitmap_and_compl (transp[bb->index],
1779 transp[bb->index], prune_exprs);
1780 break;
1781 }
1782 }
1783 }
1784
1785 /* It may be necessary to insert a large number of insns on edges to
1786 make the existing occurrences of expressions fully redundant. This
1787 routine examines the set of insertions and deletions and if the ratio
1788 of insertions to deletions is too high for a particular expression, then
1789 the expression is removed from the insertion/deletion sets.
1790
1791 N_ELEMS is the number of elements in the hash table. */
1792
1793 static void
1794 prune_insertions_deletions (int n_elems)
1795 {
1796 sbitmap_iterator sbi;
1797
1798 /* We always use I to iterate over blocks/edges and J to iterate over
1799 expressions. */
1800 unsigned int i, j;
1801
1802 /* Counts for the number of times an expression needs to be inserted and
1803 number of times an expression can be removed as a result. */
1804 int *insertions = GCNEWVEC (int, n_elems);
1805 int *deletions = GCNEWVEC (int, n_elems);
1806
1807 /* Set of expressions which require too many insertions relative to
1808 the number of deletions achieved. We will prune these out of the
1809 insertion/deletion sets. */
1810 auto_sbitmap prune_exprs (n_elems);
1811 bitmap_clear (prune_exprs);
1812
1813 /* Iterate over the edges counting the number of times each expression
1814 needs to be inserted. */
1815 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1816 {
1817 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1818 insertions[j]++;
1819 }
1820
1821 /* Similarly for deletions, but those occur in blocks rather than on
1822 edges. */
1823 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1824 {
1825 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1826 deletions[j]++;
1827 }
1828
1829 /* Now that we have accurate counts, iterate over the elements in the
1830 hash table and see if any need too many insertions relative to the
1831 number of evaluations that can be removed. If so, mark them in
1832 PRUNE_EXPRS. */
1833 for (j = 0; j < (unsigned) n_elems; j++)
1834 if (deletions[j]
1835 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1836 bitmap_set_bit (prune_exprs, j);
1837
1838 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1839 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1840 {
1841 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1842 bitmap_clear_bit (pre_insert_map[i], j);
1843
1844 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1845 bitmap_clear_bit (pre_delete_map[i], j);
1846 }
1847
1848 free (insertions);
1849 free (deletions);
1850 }
1851
1852 /* Top level routine to do the dataflow analysis needed by PRE. */
1853
1854 static struct edge_list *
1855 compute_pre_data (void)
1856 {
1857 struct edge_list *edge_list;
1858 basic_block bb;
1859
1860 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1861 prune_expressions (true);
1862 bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1863
1864 /* Compute ae_kill for each basic block using:
1865
1866 ~(TRANSP | COMP)
1867 */
1868
1869 FOR_EACH_BB_FN (bb, cfun)
1870 {
1871 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1872 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1873 }
1874
1875 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1876 ae_kill, &pre_insert_map, &pre_delete_map);
1877 sbitmap_vector_free (antloc);
1878 antloc = NULL;
1879 sbitmap_vector_free (ae_kill);
1880 ae_kill = NULL;
1881
1882 prune_insertions_deletions (expr_hash_table.n_elems);
1883
1884 return edge_list;
1885 }
1886 \f
1887 /* PRE utilities */
1888
1889 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1890 block BB.
1891
1892 VISITED is a pointer to a working buffer for tracking which BB's have
1893 been visited. It is NULL for the top-level call.
1894
1895 We treat reaching expressions that go through blocks containing the same
1896 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1897 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1898 2 as not reaching. The intent is to improve the probability of finding
1899 only one reaching expression and to reduce register lifetimes by picking
1900 the closest such expression. */
1901
1902 static int
1903 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1904 basic_block bb, char *visited)
1905 {
1906 edge pred;
1907 edge_iterator ei;
1908
1909 FOR_EACH_EDGE (pred, ei, bb->preds)
1910 {
1911 basic_block pred_bb = pred->src;
1912
1913 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1914 /* Has predecessor has already been visited? */
1915 || visited[pred_bb->index])
1916 ;/* Nothing to do. */
1917
1918 /* Does this predecessor generate this expression? */
1919 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1920 {
1921 /* Is this the occurrence we're looking for?
1922 Note that there's only one generating occurrence per block
1923 so we just need to check the block number. */
1924 if (occr_bb == pred_bb)
1925 return 1;
1926
1927 visited[pred_bb->index] = 1;
1928 }
1929 /* Ignore this predecessor if it kills the expression. */
1930 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1931 visited[pred_bb->index] = 1;
1932
1933 /* Neither gen nor kill. */
1934 else
1935 {
1936 visited[pred_bb->index] = 1;
1937 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1938 return 1;
1939 }
1940 }
1941
1942 /* All paths have been checked. */
1943 return 0;
1944 }
1945
1946 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1947 memory allocated for that function is returned. */
1948
1949 static int
1950 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1951 {
1952 int rval;
1953 char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1954
1955 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1956
1957 free (visited);
1958 return rval;
1959 }
1960 \f
1961 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
1962
1963 static rtx_insn *
1964 process_insert_insn (struct gcse_expr *expr)
1965 {
1966 rtx reg = expr->reaching_reg;
1967 /* Copy the expression to make sure we don't have any sharing issues. */
1968 rtx exp = copy_rtx (expr->expr);
1969 rtx_insn *pat;
1970
1971 start_sequence ();
1972
1973 /* If the expression is something that's an operand, like a constant,
1974 just copy it to a register. */
1975 if (general_operand (exp, GET_MODE (reg)))
1976 emit_move_insn (reg, exp);
1977
1978 /* Otherwise, make a new insn to compute this expression and make sure the
1979 insn will be recognized (this also adds any needed CLOBBERs). */
1980 else
1981 {
1982 rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1983
1984 if (insn_invalid_p (insn, false))
1985 gcc_unreachable ();
1986 }
1987
1988 pat = get_insns ();
1989 end_sequence ();
1990
1991 return pat;
1992 }
1993
1994 /* Add EXPR to the end of basic block BB.
1995
1996 This is used by both the PRE and code hoisting. */
1997
1998 static void
1999 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2000 {
2001 rtx_insn *insn = BB_END (bb);
2002 rtx_insn *new_insn;
2003 rtx reg = expr->reaching_reg;
2004 int regno = REGNO (reg);
2005 rtx_insn *pat, *pat_end;
2006
2007 pat = process_insert_insn (expr);
2008 gcc_assert (pat && INSN_P (pat));
2009
2010 pat_end = pat;
2011 while (NEXT_INSN (pat_end) != NULL_RTX)
2012 pat_end = NEXT_INSN (pat_end);
2013
2014 /* If the last insn is a jump, insert EXPR in front [taking care to
2015 handle cc0, etc. properly]. Similarly we need to care trapping
2016 instructions in presence of non-call exceptions. */
2017
2018 if (JUMP_P (insn)
2019 || (NONJUMP_INSN_P (insn)
2020 && (!single_succ_p (bb)
2021 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2022 {
2023 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2024 if cc0 isn't set. */
2025 if (HAVE_cc0)
2026 {
2027 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2028 if (note)
2029 insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2030 else
2031 {
2032 rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2033 if (maybe_cc0_setter
2034 && INSN_P (maybe_cc0_setter)
2035 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2036 insn = maybe_cc0_setter;
2037 }
2038 }
2039
2040 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2041 new_insn = emit_insn_before_noloc (pat, insn, bb);
2042 }
2043
2044 /* Likewise if the last insn is a call, as will happen in the presence
2045 of exception handling. */
2046 else if (CALL_P (insn)
2047 && (!single_succ_p (bb)
2048 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2049 {
2050 /* Keeping in mind targets with small register classes and parameters
2051 in registers, we search backward and place the instructions before
2052 the first parameter is loaded. Do this for everyone for consistency
2053 and a presumption that we'll get better code elsewhere as well. */
2054
2055 /* Since different machines initialize their parameter registers
2056 in different orders, assume nothing. Collect the set of all
2057 parameter registers. */
2058 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2059
2060 /* If we found all the parameter loads, then we want to insert
2061 before the first parameter load.
2062
2063 If we did not find all the parameter loads, then we might have
2064 stopped on the head of the block, which could be a CODE_LABEL.
2065 If we inserted before the CODE_LABEL, then we would be putting
2066 the insn in the wrong basic block. In that case, put the insn
2067 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2068 while (LABEL_P (insn)
2069 || NOTE_INSN_BASIC_BLOCK_P (insn))
2070 insn = NEXT_INSN (insn);
2071
2072 new_insn = emit_insn_before_noloc (pat, insn, bb);
2073 }
2074 else
2075 new_insn = emit_insn_after_noloc (pat, insn, bb);
2076
2077 while (1)
2078 {
2079 if (INSN_P (pat))
2080 add_label_notes (PATTERN (pat), new_insn);
2081 if (pat == pat_end)
2082 break;
2083 pat = NEXT_INSN (pat);
2084 }
2085
2086 gcse_create_count++;
2087
2088 if (dump_file)
2089 {
2090 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2091 bb->index, INSN_UID (new_insn));
2092 fprintf (dump_file, "copying expression %d to reg %d\n",
2093 expr->bitmap_index, regno);
2094 }
2095 }
2096
2097 /* Insert partially redundant expressions on edges in the CFG to make
2098 the expressions fully redundant. */
2099
2100 static int
2101 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2102 {
2103 int e, i, j, num_edges, set_size, did_insert = 0;
2104 sbitmap *inserted;
2105
2106 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2107 if it reaches any of the deleted expressions. */
2108
2109 set_size = pre_insert_map[0]->size;
2110 num_edges = NUM_EDGES (edge_list);
2111 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2112 bitmap_vector_clear (inserted, num_edges);
2113
2114 for (e = 0; e < num_edges; e++)
2115 {
2116 int indx;
2117 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2118
2119 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2120 {
2121 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2122
2123 for (j = indx;
2124 insert && j < (int) expr_hash_table.n_elems;
2125 j++, insert >>= 1)
2126 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2127 {
2128 struct gcse_expr *expr = index_map[j];
2129 struct gcse_occr *occr;
2130
2131 /* Now look at each deleted occurrence of this expression. */
2132 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2133 {
2134 if (! occr->deleted_p)
2135 continue;
2136
2137 /* Insert this expression on this edge if it would
2138 reach the deleted occurrence in BB. */
2139 if (!bitmap_bit_p (inserted[e], j))
2140 {
2141 rtx_insn *insn;
2142 edge eg = INDEX_EDGE (edge_list, e);
2143
2144 /* We can't insert anything on an abnormal and
2145 critical edge, so we insert the insn at the end of
2146 the previous block. There are several alternatives
2147 detailed in Morgans book P277 (sec 10.5) for
2148 handling this situation. This one is easiest for
2149 now. */
2150
2151 if (eg->flags & EDGE_ABNORMAL)
2152 insert_insn_end_basic_block (index_map[j], bb);
2153 else
2154 {
2155 insn = process_insert_insn (index_map[j]);
2156 insert_insn_on_edge (insn, eg);
2157 }
2158
2159 if (dump_file)
2160 {
2161 fprintf (dump_file, "PRE: edge (%d,%d), ",
2162 bb->index,
2163 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2164 fprintf (dump_file, "copy expression %d\n",
2165 expr->bitmap_index);
2166 }
2167
2168 update_ld_motion_stores (expr);
2169 bitmap_set_bit (inserted[e], j);
2170 did_insert = 1;
2171 gcse_create_count++;
2172 }
2173 }
2174 }
2175 }
2176 }
2177
2178 sbitmap_vector_free (inserted);
2179 return did_insert;
2180 }
2181
2182 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2183 Given "old_reg <- expr" (INSN), instead of adding after it
2184 reaching_reg <- old_reg
2185 it's better to do the following:
2186 reaching_reg <- expr
2187 old_reg <- reaching_reg
2188 because this way copy propagation can discover additional PRE
2189 opportunities. But if this fails, we try the old way.
2190 When "expr" is a store, i.e.
2191 given "MEM <- old_reg", instead of adding after it
2192 reaching_reg <- old_reg
2193 it's better to add it before as follows:
2194 reaching_reg <- old_reg
2195 MEM <- reaching_reg. */
2196
2197 static void
2198 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2199 {
2200 rtx reg = expr->reaching_reg;
2201 int regno = REGNO (reg);
2202 int indx = expr->bitmap_index;
2203 rtx pat = PATTERN (insn);
2204 rtx set, first_set;
2205 rtx_insn *new_insn;
2206 rtx old_reg;
2207 int i;
2208
2209 /* This block matches the logic in hash_scan_insn. */
2210 switch (GET_CODE (pat))
2211 {
2212 case SET:
2213 set = pat;
2214 break;
2215
2216 case PARALLEL:
2217 /* Search through the parallel looking for the set whose
2218 source was the expression that we're interested in. */
2219 first_set = NULL_RTX;
2220 set = NULL_RTX;
2221 for (i = 0; i < XVECLEN (pat, 0); i++)
2222 {
2223 rtx x = XVECEXP (pat, 0, i);
2224 if (GET_CODE (x) == SET)
2225 {
2226 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2227 may not find an equivalent expression, but in this
2228 case the PARALLEL will have a single set. */
2229 if (first_set == NULL_RTX)
2230 first_set = x;
2231 if (expr_equiv_p (SET_SRC (x), expr->expr))
2232 {
2233 set = x;
2234 break;
2235 }
2236 }
2237 }
2238
2239 gcc_assert (first_set);
2240 if (set == NULL_RTX)
2241 set = first_set;
2242 break;
2243
2244 default:
2245 gcc_unreachable ();
2246 }
2247
2248 if (REG_P (SET_DEST (set)))
2249 {
2250 old_reg = SET_DEST (set);
2251 /* Check if we can modify the set destination in the original insn. */
2252 if (validate_change (insn, &SET_DEST (set), reg, 0))
2253 {
2254 new_insn = gen_move_insn (old_reg, reg);
2255 new_insn = emit_insn_after (new_insn, insn);
2256 }
2257 else
2258 {
2259 new_insn = gen_move_insn (reg, old_reg);
2260 new_insn = emit_insn_after (new_insn, insn);
2261 }
2262 }
2263 else /* This is possible only in case of a store to memory. */
2264 {
2265 old_reg = SET_SRC (set);
2266 new_insn = gen_move_insn (reg, old_reg);
2267
2268 /* Check if we can modify the set source in the original insn. */
2269 if (validate_change (insn, &SET_SRC (set), reg, 0))
2270 new_insn = emit_insn_before (new_insn, insn);
2271 else
2272 new_insn = emit_insn_after (new_insn, insn);
2273 }
2274
2275 gcse_create_count++;
2276
2277 if (dump_file)
2278 fprintf (dump_file,
2279 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2280 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2281 INSN_UID (insn), regno);
2282 }
2283
2284 /* Copy available expressions that reach the redundant expression
2285 to `reaching_reg'. */
2286
2287 static void
2288 pre_insert_copies (void)
2289 {
2290 unsigned int i, added_copy;
2291 struct gcse_expr *expr;
2292 struct gcse_occr *occr;
2293 struct gcse_occr *avail;
2294
2295 /* For each available expression in the table, copy the result to
2296 `reaching_reg' if the expression reaches a deleted one.
2297
2298 ??? The current algorithm is rather brute force.
2299 Need to do some profiling. */
2300
2301 for (i = 0; i < expr_hash_table.size; i++)
2302 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2303 {
2304 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2305 we don't want to insert a copy here because the expression may not
2306 really be redundant. So only insert an insn if the expression was
2307 deleted. This test also avoids further processing if the
2308 expression wasn't deleted anywhere. */
2309 if (expr->reaching_reg == NULL)
2310 continue;
2311
2312 /* Set when we add a copy for that expression. */
2313 added_copy = 0;
2314
2315 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2316 {
2317 if (! occr->deleted_p)
2318 continue;
2319
2320 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2321 {
2322 rtx_insn *insn = avail->insn;
2323
2324 /* No need to handle this one if handled already. */
2325 if (avail->copied_p)
2326 continue;
2327
2328 /* Don't handle this one if it's a redundant one. */
2329 if (insn->deleted ())
2330 continue;
2331
2332 /* Or if the expression doesn't reach the deleted one. */
2333 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2334 expr,
2335 BLOCK_FOR_INSN (occr->insn)))
2336 continue;
2337
2338 added_copy = 1;
2339
2340 /* Copy the result of avail to reaching_reg. */
2341 pre_insert_copy_insn (expr, insn);
2342 avail->copied_p = 1;
2343 }
2344 }
2345
2346 if (added_copy)
2347 update_ld_motion_stores (expr);
2348 }
2349 }
2350
2351 struct set_data
2352 {
2353 rtx_insn *insn;
2354 const_rtx set;
2355 int nsets;
2356 };
2357
2358 /* Increment number of sets and record set in DATA. */
2359
2360 static void
2361 record_set_data (rtx dest, const_rtx set, void *data)
2362 {
2363 struct set_data *s = (struct set_data *)data;
2364
2365 if (GET_CODE (set) == SET)
2366 {
2367 /* We allow insns having multiple sets, where all but one are
2368 dead as single set insns. In the common case only a single
2369 set is present, so we want to avoid checking for REG_UNUSED
2370 notes unless necessary. */
2371 if (s->nsets == 1
2372 && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2373 && !side_effects_p (s->set))
2374 s->nsets = 0;
2375
2376 if (!s->nsets)
2377 {
2378 /* Record this set. */
2379 s->nsets += 1;
2380 s->set = set;
2381 }
2382 else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2383 || side_effects_p (set))
2384 s->nsets += 1;
2385 }
2386 }
2387
2388 static const_rtx
2389 single_set_gcse (rtx_insn *insn)
2390 {
2391 struct set_data s;
2392 rtx pattern;
2393
2394 gcc_assert (INSN_P (insn));
2395
2396 /* Optimize common case. */
2397 pattern = PATTERN (insn);
2398 if (GET_CODE (pattern) == SET)
2399 return pattern;
2400
2401 s.insn = insn;
2402 s.nsets = 0;
2403 note_stores (pattern, record_set_data, &s);
2404
2405 /* Considered invariant insns have exactly one set. */
2406 gcc_assert (s.nsets == 1);
2407 return s.set;
2408 }
2409
2410 /* Emit move from SRC to DEST noting the equivalence with expression computed
2411 in INSN. */
2412
2413 static rtx_insn *
2414 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2415 {
2416 rtx_insn *new_rtx;
2417 const_rtx set = single_set_gcse (insn);
2418 rtx set2;
2419 rtx note;
2420 rtx eqv = NULL_RTX;
2421
2422 /* This should never fail since we're creating a reg->reg copy
2423 we've verified to be valid. */
2424
2425 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2426
2427 /* Note the equivalence for local CSE pass. Take the note from the old
2428 set if there was one. Otherwise record the SET_SRC from the old set
2429 unless DEST is also an operand of the SET_SRC. */
2430 set2 = single_set (new_rtx);
2431 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2432 return new_rtx;
2433 if ((note = find_reg_equal_equiv_note (insn)))
2434 eqv = XEXP (note, 0);
2435 else if (! REG_P (dest)
2436 || ! reg_mentioned_p (dest, SET_SRC (set)))
2437 eqv = SET_SRC (set);
2438
2439 if (eqv != NULL_RTX)
2440 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2441
2442 return new_rtx;
2443 }
2444
2445 /* Delete redundant computations.
2446 Deletion is done by changing the insn to copy the `reaching_reg' of
2447 the expression into the result of the SET. It is left to later passes
2448 to propagate the copy or eliminate it.
2449
2450 Return nonzero if a change is made. */
2451
2452 static int
2453 pre_delete (void)
2454 {
2455 unsigned int i;
2456 int changed;
2457 struct gcse_expr *expr;
2458 struct gcse_occr *occr;
2459
2460 changed = 0;
2461 for (i = 0; i < expr_hash_table.size; i++)
2462 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2463 {
2464 int indx = expr->bitmap_index;
2465
2466 /* We only need to search antic_occr since we require ANTLOC != 0. */
2467 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2468 {
2469 rtx_insn *insn = occr->insn;
2470 rtx set;
2471 basic_block bb = BLOCK_FOR_INSN (insn);
2472
2473 /* We only delete insns that have a single_set. */
2474 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2475 && (set = single_set (insn)) != 0
2476 && dbg_cnt (pre_insn))
2477 {
2478 /* Create a pseudo-reg to store the result of reaching
2479 expressions into. Get the mode for the new pseudo from
2480 the mode of the original destination pseudo. */
2481 if (expr->reaching_reg == NULL)
2482 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2483
2484 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2485 delete_insn (insn);
2486 occr->deleted_p = 1;
2487 changed = 1;
2488 gcse_subst_count++;
2489
2490 if (dump_file)
2491 {
2492 fprintf (dump_file,
2493 "PRE: redundant insn %d (expression %d) in ",
2494 INSN_UID (insn), indx);
2495 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2496 bb->index, REGNO (expr->reaching_reg));
2497 }
2498 }
2499 }
2500 }
2501
2502 return changed;
2503 }
2504
2505 /* Perform GCSE optimizations using PRE.
2506 This is called by one_pre_gcse_pass after all the dataflow analysis
2507 has been done.
2508
2509 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2510 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2511 Compiler Design and Implementation.
2512
2513 ??? A new pseudo reg is created to hold the reaching expression. The nice
2514 thing about the classical approach is that it would try to use an existing
2515 reg. If the register can't be adequately optimized [i.e. we introduce
2516 reload problems], one could add a pass here to propagate the new register
2517 through the block.
2518
2519 ??? We don't handle single sets in PARALLELs because we're [currently] not
2520 able to copy the rest of the parallel when we insert copies to create full
2521 redundancies from partial redundancies. However, there's no reason why we
2522 can't handle PARALLELs in the cases where there are no partial
2523 redundancies. */
2524
2525 static int
2526 pre_gcse (struct edge_list *edge_list)
2527 {
2528 unsigned int i;
2529 int did_insert, changed;
2530 struct gcse_expr **index_map;
2531 struct gcse_expr *expr;
2532
2533 /* Compute a mapping from expression number (`bitmap_index') to
2534 hash table entry. */
2535
2536 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2537 for (i = 0; i < expr_hash_table.size; i++)
2538 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2539 index_map[expr->bitmap_index] = expr;
2540
2541 /* Delete the redundant insns first so that
2542 - we know what register to use for the new insns and for the other
2543 ones with reaching expressions
2544 - we know which insns are redundant when we go to create copies */
2545
2546 changed = pre_delete ();
2547 did_insert = pre_edge_insert (edge_list, index_map);
2548
2549 /* In other places with reaching expressions, copy the expression to the
2550 specially allocated pseudo-reg that reaches the redundant expr. */
2551 pre_insert_copies ();
2552 if (did_insert)
2553 {
2554 commit_edge_insertions ();
2555 changed = 1;
2556 }
2557
2558 free (index_map);
2559 return changed;
2560 }
2561
2562 /* Top level routine to perform one PRE GCSE pass.
2563
2564 Return nonzero if a change was made. */
2565
2566 static int
2567 one_pre_gcse_pass (void)
2568 {
2569 int changed = 0;
2570
2571 gcse_subst_count = 0;
2572 gcse_create_count = 0;
2573
2574 /* Return if there's nothing to do, or it is too expensive. */
2575 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2576 || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2577 return 0;
2578
2579 /* We need alias. */
2580 init_alias_analysis ();
2581
2582 bytes_used = 0;
2583 gcc_obstack_init (&gcse_obstack);
2584 alloc_gcse_mem ();
2585
2586 alloc_hash_table (&expr_hash_table);
2587 add_noreturn_fake_exit_edges ();
2588 if (flag_gcse_lm)
2589 compute_ld_motion_mems ();
2590
2591 compute_hash_table (&expr_hash_table);
2592 if (flag_gcse_lm)
2593 trim_ld_motion_mems ();
2594 if (dump_file)
2595 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2596
2597 if (expr_hash_table.n_elems > 0)
2598 {
2599 struct edge_list *edge_list;
2600 alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2601 edge_list = compute_pre_data ();
2602 changed |= pre_gcse (edge_list);
2603 free_edge_list (edge_list);
2604 free_pre_mem ();
2605 }
2606
2607 if (flag_gcse_lm)
2608 free_ld_motion_mems ();
2609 remove_fake_exit_edges ();
2610 free_hash_table (&expr_hash_table);
2611
2612 free_gcse_mem ();
2613 obstack_free (&gcse_obstack, NULL);
2614
2615 /* We are finished with alias. */
2616 end_alias_analysis ();
2617
2618 if (dump_file)
2619 {
2620 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2621 current_function_name (), n_basic_blocks_for_fn (cfun),
2622 bytes_used);
2623 fprintf (dump_file, "%d substs, %d insns created\n",
2624 gcse_subst_count, gcse_create_count);
2625 }
2626
2627 return changed;
2628 }
2629 \f
2630 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2631 to INSN. If such notes are added to an insn which references a
2632 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2633 that note, because the following loop optimization pass requires
2634 them. */
2635
2636 /* ??? If there was a jump optimization pass after gcse and before loop,
2637 then we would not need to do this here, because jump would add the
2638 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2639
2640 static void
2641 add_label_notes (rtx x, rtx_insn *insn)
2642 {
2643 enum rtx_code code = GET_CODE (x);
2644 int i, j;
2645 const char *fmt;
2646
2647 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2648 {
2649 /* This code used to ignore labels that referred to dispatch tables to
2650 avoid flow generating (slightly) worse code.
2651
2652 We no longer ignore such label references (see LABEL_REF handling in
2653 mark_jump_label for additional information). */
2654
2655 /* There's no reason for current users to emit jump-insns with
2656 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2657 notes. */
2658 gcc_assert (!JUMP_P (insn));
2659 add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2660
2661 if (LABEL_P (label_ref_label (x)))
2662 LABEL_NUSES (label_ref_label (x))++;
2663
2664 return;
2665 }
2666
2667 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2668 {
2669 if (fmt[i] == 'e')
2670 add_label_notes (XEXP (x, i), insn);
2671 else if (fmt[i] == 'E')
2672 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2673 add_label_notes (XVECEXP (x, i, j), insn);
2674 }
2675 }
2676
2677 /* Code Hoisting variables and subroutines. */
2678
2679 /* Very busy expressions. */
2680 static sbitmap *hoist_vbein;
2681 static sbitmap *hoist_vbeout;
2682
2683 /* ??? We could compute post dominators and run this algorithm in
2684 reverse to perform tail merging, doing so would probably be
2685 more effective than the tail merging code in jump.c.
2686
2687 It's unclear if tail merging could be run in parallel with
2688 code hoisting. It would be nice. */
2689
2690 /* Allocate vars used for code hoisting analysis. */
2691
2692 static void
2693 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2694 {
2695 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2696 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2697 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2698
2699 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2700 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2701 }
2702
2703 /* Free vars used for code hoisting analysis. */
2704
2705 static void
2706 free_code_hoist_mem (void)
2707 {
2708 sbitmap_vector_free (antloc);
2709 sbitmap_vector_free (transp);
2710 sbitmap_vector_free (comp);
2711
2712 sbitmap_vector_free (hoist_vbein);
2713 sbitmap_vector_free (hoist_vbeout);
2714
2715 free_dominance_info (CDI_DOMINATORS);
2716 }
2717
2718 /* Compute the very busy expressions at entry/exit from each block.
2719
2720 An expression is very busy if all paths from a given point
2721 compute the expression. */
2722
2723 static void
2724 compute_code_hoist_vbeinout (void)
2725 {
2726 int changed, passes;
2727 basic_block bb;
2728
2729 bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2730 bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2731
2732 passes = 0;
2733 changed = 1;
2734
2735 while (changed)
2736 {
2737 changed = 0;
2738
2739 /* We scan the blocks in the reverse order to speed up
2740 the convergence. */
2741 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2742 {
2743 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2744 {
2745 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2746 hoist_vbein, bb);
2747
2748 /* Include expressions in VBEout that are calculated
2749 in BB and available at its end. */
2750 bitmap_ior (hoist_vbeout[bb->index],
2751 hoist_vbeout[bb->index], comp[bb->index]);
2752 }
2753
2754 changed |= bitmap_or_and (hoist_vbein[bb->index],
2755 antloc[bb->index],
2756 hoist_vbeout[bb->index],
2757 transp[bb->index]);
2758 }
2759
2760 passes++;
2761 }
2762
2763 if (dump_file)
2764 {
2765 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2766
2767 FOR_EACH_BB_FN (bb, cfun)
2768 {
2769 fprintf (dump_file, "vbein (%d): ", bb->index);
2770 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2771 fprintf (dump_file, "vbeout(%d): ", bb->index);
2772 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2773 }
2774 }
2775 }
2776
2777 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2778
2779 static void
2780 compute_code_hoist_data (void)
2781 {
2782 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2783 prune_expressions (false);
2784 compute_code_hoist_vbeinout ();
2785 calculate_dominance_info (CDI_DOMINATORS);
2786 if (dump_file)
2787 fprintf (dump_file, "\n");
2788 }
2789
2790 /* Update register pressure for BB when hoisting an expression from
2791 instruction FROM, if live ranges of inputs are shrunk. Also
2792 maintain live_in information if live range of register referred
2793 in FROM is shrunk.
2794
2795 Return 0 if register pressure doesn't change, otherwise return
2796 the number by which register pressure is decreased.
2797
2798 NOTE: Register pressure won't be increased in this function. */
2799
2800 static int
2801 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2802 {
2803 rtx dreg;
2804 rtx_insn *insn;
2805 basic_block succ_bb;
2806 df_ref use, op_ref;
2807 edge succ;
2808 edge_iterator ei;
2809 int decreased_pressure = 0;
2810 int nregs;
2811 enum reg_class pressure_class;
2812
2813 FOR_EACH_INSN_USE (use, from)
2814 {
2815 dreg = DF_REF_REAL_REG (use);
2816 /* The live range of register is shrunk only if it isn't:
2817 1. referred on any path from the end of this block to EXIT, or
2818 2. referred by insns other than FROM in this block. */
2819 FOR_EACH_EDGE (succ, ei, bb->succs)
2820 {
2821 succ_bb = succ->dest;
2822 if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2823 continue;
2824
2825 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2826 break;
2827 }
2828 if (succ != NULL)
2829 continue;
2830
2831 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2832 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2833 {
2834 if (!DF_REF_INSN_INFO (op_ref))
2835 continue;
2836
2837 insn = DF_REF_INSN (op_ref);
2838 if (BLOCK_FOR_INSN (insn) == bb
2839 && NONDEBUG_INSN_P (insn) && insn != from)
2840 break;
2841 }
2842
2843 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2844 /* Decrease register pressure and update live_in information for
2845 this block. */
2846 if (!op_ref && pressure_class != NO_REGS)
2847 {
2848 decreased_pressure += nregs;
2849 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2850 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2851 }
2852 }
2853 return decreased_pressure;
2854 }
2855
2856 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2857 flow graph, if it can reach BB unimpared. Stop the search if the
2858 expression would need to be moved more than DISTANCE instructions.
2859
2860 DISTANCE is the number of instructions through which EXPR can be
2861 hoisted up in flow graph.
2862
2863 BB_SIZE points to an array which contains the number of instructions
2864 for each basic block.
2865
2866 PRESSURE_CLASS and NREGS are register class and number of hard registers
2867 for storing EXPR.
2868
2869 HOISTED_BBS points to a bitmap indicating basic blocks through which
2870 EXPR is hoisted.
2871
2872 FROM is the instruction from which EXPR is hoisted.
2873
2874 It's unclear exactly what Muchnick meant by "unimpared". It seems
2875 to me that the expression must either be computed or transparent in
2876 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2877 would allow the expression to be hoisted out of loops, even if
2878 the expression wasn't a loop invariant.
2879
2880 Contrast this to reachability for PRE where an expression is
2881 considered reachable if *any* path reaches instead of *all*
2882 paths. */
2883
2884 static int
2885 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2886 basic_block bb, sbitmap visited, int distance,
2887 int *bb_size, enum reg_class pressure_class,
2888 int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2889 {
2890 unsigned int i;
2891 edge pred;
2892 edge_iterator ei;
2893 sbitmap_iterator sbi;
2894 int visited_allocated_locally = 0;
2895 int decreased_pressure = 0;
2896
2897 if (flag_ira_hoist_pressure)
2898 {
2899 /* Record old information of basic block BB when it is visited
2900 at the first time. */
2901 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2902 {
2903 struct bb_data *data = BB_DATA (bb);
2904 bitmap_copy (data->backup, data->live_in);
2905 data->old_pressure = data->max_reg_pressure[pressure_class];
2906 }
2907 decreased_pressure = update_bb_reg_pressure (bb, from);
2908 }
2909 /* Terminate the search if distance, for which EXPR is allowed to move,
2910 is exhausted. */
2911 if (distance > 0)
2912 {
2913 if (flag_ira_hoist_pressure)
2914 {
2915 /* Prefer to hoist EXPR if register pressure is decreased. */
2916 if (decreased_pressure > *nregs)
2917 distance += bb_size[bb->index];
2918 /* Let EXPR be hoisted through basic block at no cost if one
2919 of following conditions is satisfied:
2920
2921 1. The basic block has low register pressure.
2922 2. Register pressure won't be increases after hoisting EXPR.
2923
2924 Constant expressions is handled conservatively, because
2925 hoisting constant expression aggressively results in worse
2926 code. This decision is made by the observation of CSiBE
2927 on ARM target, while it has no obvious effect on other
2928 targets like x86, x86_64, mips and powerpc. */
2929 else if (CONST_INT_P (expr->expr)
2930 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2931 >= ira_class_hard_regs_num[pressure_class]
2932 && decreased_pressure < *nregs))
2933 distance -= bb_size[bb->index];
2934 }
2935 else
2936 distance -= bb_size[bb->index];
2937
2938 if (distance <= 0)
2939 return 0;
2940 }
2941 else
2942 gcc_assert (distance == 0);
2943
2944 if (visited == NULL)
2945 {
2946 visited_allocated_locally = 1;
2947 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2948 bitmap_clear (visited);
2949 }
2950
2951 FOR_EACH_EDGE (pred, ei, bb->preds)
2952 {
2953 basic_block pred_bb = pred->src;
2954
2955 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2956 break;
2957 else if (pred_bb == expr_bb)
2958 continue;
2959 else if (bitmap_bit_p (visited, pred_bb->index))
2960 continue;
2961 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2962 break;
2963 /* Not killed. */
2964 else
2965 {
2966 bitmap_set_bit (visited, pred_bb->index);
2967 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2968 visited, distance, bb_size,
2969 pressure_class, nregs,
2970 hoisted_bbs, from))
2971 break;
2972 }
2973 }
2974 if (visited_allocated_locally)
2975 {
2976 /* If EXPR can be hoisted to expr_bb, record basic blocks through
2977 which EXPR is hoisted in hoisted_bbs. */
2978 if (flag_ira_hoist_pressure && !pred)
2979 {
2980 /* Record the basic block from which EXPR is hoisted. */
2981 bitmap_set_bit (visited, bb->index);
2982 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
2983 bitmap_set_bit (hoisted_bbs, i);
2984 }
2985 sbitmap_free (visited);
2986 }
2987
2988 return (pred == NULL);
2989 }
2990 \f
2991 /* Find occurrence in BB. */
2992
2993 static struct gcse_occr *
2994 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
2995 {
2996 /* Find the right occurrence of this expression. */
2997 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
2998 occr = occr->next;
2999
3000 return occr;
3001 }
3002
3003 /* Actually perform code hoisting.
3004
3005 The code hoisting pass can hoist multiple computations of the same
3006 expression along dominated path to a dominating basic block, like
3007 from b2/b3 to b1 as depicted below:
3008
3009 b1 ------
3010 /\ |
3011 / \ |
3012 bx by distance
3013 / \ |
3014 / \ |
3015 b2 b3 ------
3016
3017 Unfortunately code hoisting generally extends the live range of an
3018 output pseudo register, which increases register pressure and hurts
3019 register allocation. To address this issue, an attribute MAX_DISTANCE
3020 is computed and attached to each expression. The attribute is computed
3021 from rtx cost of the corresponding expression and it's used to control
3022 how long the expression can be hoisted up in flow graph. As the
3023 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3024 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3025 register pressure if live ranges of inputs are shrunk.
3026
3027 Option "-fira-hoist-pressure" implements register pressure directed
3028 hoist based on upper method. The rationale is:
3029 1. Calculate register pressure for each basic block by reusing IRA
3030 facility.
3031 2. When expression is hoisted through one basic block, GCC checks
3032 the change of live ranges for inputs/output. The basic block's
3033 register pressure will be increased because of extended live
3034 range of output. However, register pressure will be decreased
3035 if the live ranges of inputs are shrunk.
3036 3. After knowing how hoisting affects register pressure, GCC prefers
3037 to hoist the expression if it can decrease register pressure, by
3038 increasing DISTANCE of the corresponding expression.
3039 4. If hoisting the expression increases register pressure, GCC checks
3040 register pressure of the basic block and decrease DISTANCE only if
3041 the register pressure is high. In other words, expression will be
3042 hoisted through at no cost if the basic block has low register
3043 pressure.
3044 5. Update register pressure information for basic blocks through
3045 which expression is hoisted. */
3046
3047 static int
3048 hoist_code (void)
3049 {
3050 basic_block bb, dominated;
3051 vec<basic_block> dom_tree_walk;
3052 unsigned int dom_tree_walk_index;
3053 vec<basic_block> domby;
3054 unsigned int i, j, k;
3055 struct gcse_expr **index_map;
3056 struct gcse_expr *expr;
3057 int *to_bb_head;
3058 int *bb_size;
3059 int changed = 0;
3060 struct bb_data *data;
3061 /* Basic blocks that have occurrences reachable from BB. */
3062 bitmap from_bbs;
3063 /* Basic blocks through which expr is hoisted. */
3064 bitmap hoisted_bbs = NULL;
3065 bitmap_iterator bi;
3066
3067 /* Compute a mapping from expression number (`bitmap_index') to
3068 hash table entry. */
3069
3070 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3071 for (i = 0; i < expr_hash_table.size; i++)
3072 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3073 index_map[expr->bitmap_index] = expr;
3074
3075 /* Calculate sizes of basic blocks and note how far
3076 each instruction is from the start of its block. We then use this
3077 data to restrict distance an expression can travel. */
3078
3079 to_bb_head = XCNEWVEC (int, get_max_uid ());
3080 bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3081
3082 FOR_EACH_BB_FN (bb, cfun)
3083 {
3084 rtx_insn *insn;
3085 int to_head;
3086
3087 to_head = 0;
3088 FOR_BB_INSNS (bb, insn)
3089 {
3090 /* Don't count debug instructions to avoid them affecting
3091 decision choices. */
3092 if (NONDEBUG_INSN_P (insn))
3093 to_bb_head[INSN_UID (insn)] = to_head++;
3094 }
3095
3096 bb_size[bb->index] = to_head;
3097 }
3098
3099 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3100 && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3101 == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3102
3103 from_bbs = BITMAP_ALLOC (NULL);
3104 if (flag_ira_hoist_pressure)
3105 hoisted_bbs = BITMAP_ALLOC (NULL);
3106
3107 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3108 ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3109
3110 /* Walk over each basic block looking for potentially hoistable
3111 expressions, nothing gets hoisted from the entry block. */
3112 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3113 {
3114 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3115
3116 if (domby.length () == 0)
3117 continue;
3118
3119 /* Examine each expression that is very busy at the exit of this
3120 block. These are the potentially hoistable expressions. */
3121 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3122 {
3123 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3124 {
3125 int nregs = 0;
3126 enum reg_class pressure_class = NO_REGS;
3127 /* Current expression. */
3128 struct gcse_expr *expr = index_map[i];
3129 /* Number of occurrences of EXPR that can be hoisted to BB. */
3130 int hoistable = 0;
3131 /* Occurrences reachable from BB. */
3132 vec<occr_t> occrs_to_hoist = vNULL;
3133 /* We want to insert the expression into BB only once, so
3134 note when we've inserted it. */
3135 int insn_inserted_p;
3136 occr_t occr;
3137
3138 /* If an expression is computed in BB and is available at end of
3139 BB, hoist all occurrences dominated by BB to BB. */
3140 if (bitmap_bit_p (comp[bb->index], i))
3141 {
3142 occr = find_occr_in_bb (expr->antic_occr, bb);
3143
3144 if (occr)
3145 {
3146 /* An occurrence might've been already deleted
3147 while processing a dominator of BB. */
3148 if (!occr->deleted_p)
3149 {
3150 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3151 hoistable++;
3152 }
3153 }
3154 else
3155 hoistable++;
3156 }
3157
3158 /* We've found a potentially hoistable expression, now
3159 we look at every block BB dominates to see if it
3160 computes the expression. */
3161 FOR_EACH_VEC_ELT (domby, j, dominated)
3162 {
3163 int max_distance;
3164
3165 /* Ignore self dominance. */
3166 if (bb == dominated)
3167 continue;
3168 /* We've found a dominated block, now see if it computes
3169 the busy expression and whether or not moving that
3170 expression to the "beginning" of that block is safe. */
3171 if (!bitmap_bit_p (antloc[dominated->index], i))
3172 continue;
3173
3174 occr = find_occr_in_bb (expr->antic_occr, dominated);
3175 gcc_assert (occr);
3176
3177 /* An occurrence might've been already deleted
3178 while processing a dominator of BB. */
3179 if (occr->deleted_p)
3180 continue;
3181 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3182
3183 max_distance = expr->max_distance;
3184 if (max_distance > 0)
3185 /* Adjust MAX_DISTANCE to account for the fact that
3186 OCCR won't have to travel all of DOMINATED, but
3187 only part of it. */
3188 max_distance += (bb_size[dominated->index]
3189 - to_bb_head[INSN_UID (occr->insn)]);
3190
3191 pressure_class = get_pressure_class_and_nregs (occr->insn,
3192 &nregs);
3193
3194 /* Note if the expression should be hoisted from the dominated
3195 block to BB if it can reach DOMINATED unimpared.
3196
3197 Keep track of how many times this expression is hoistable
3198 from a dominated block into BB. */
3199 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3200 max_distance, bb_size,
3201 pressure_class, &nregs,
3202 hoisted_bbs, occr->insn))
3203 {
3204 hoistable++;
3205 occrs_to_hoist.safe_push (occr);
3206 bitmap_set_bit (from_bbs, dominated->index);
3207 }
3208 }
3209
3210 /* If we found more than one hoistable occurrence of this
3211 expression, then note it in the vector of expressions to
3212 hoist. It makes no sense to hoist things which are computed
3213 in only one BB, and doing so tends to pessimize register
3214 allocation. One could increase this value to try harder
3215 to avoid any possible code expansion due to register
3216 allocation issues; however experiments have shown that
3217 the vast majority of hoistable expressions are only movable
3218 from two successors, so raising this threshold is likely
3219 to nullify any benefit we get from code hoisting. */
3220 if (hoistable > 1 && dbg_cnt (hoist_insn))
3221 {
3222 /* If (hoistable != vec::length), then there is
3223 an occurrence of EXPR in BB itself. Don't waste
3224 time looking for LCA in this case. */
3225 if ((unsigned) hoistable == occrs_to_hoist.length ())
3226 {
3227 basic_block lca;
3228
3229 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3230 from_bbs);
3231 if (lca != bb)
3232 /* Punt, it's better to hoist these occurrences to
3233 LCA. */
3234 occrs_to_hoist.release ();
3235 }
3236 }
3237 else
3238 /* Punt, no point hoisting a single occurrence. */
3239 occrs_to_hoist.release ();
3240
3241 if (flag_ira_hoist_pressure
3242 && !occrs_to_hoist.is_empty ())
3243 {
3244 /* Increase register pressure of basic blocks to which
3245 expr is hoisted because of extended live range of
3246 output. */
3247 data = BB_DATA (bb);
3248 data->max_reg_pressure[pressure_class] += nregs;
3249 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3250 {
3251 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3252 data->max_reg_pressure[pressure_class] += nregs;
3253 }
3254 }
3255 else if (flag_ira_hoist_pressure)
3256 {
3257 /* Restore register pressure and live_in info for basic
3258 blocks recorded in hoisted_bbs when expr will not be
3259 hoisted. */
3260 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3261 {
3262 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3263 bitmap_copy (data->live_in, data->backup);
3264 data->max_reg_pressure[pressure_class]
3265 = data->old_pressure;
3266 }
3267 }
3268
3269 if (flag_ira_hoist_pressure)
3270 bitmap_clear (hoisted_bbs);
3271
3272 insn_inserted_p = 0;
3273
3274 /* Walk through occurrences of I'th expressions we want
3275 to hoist to BB and make the transformations. */
3276 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3277 {
3278 rtx_insn *insn;
3279 const_rtx set;
3280
3281 gcc_assert (!occr->deleted_p);
3282
3283 insn = occr->insn;
3284 set = single_set_gcse (insn);
3285
3286 /* Create a pseudo-reg to store the result of reaching
3287 expressions into. Get the mode for the new pseudo
3288 from the mode of the original destination pseudo.
3289
3290 It is important to use new pseudos whenever we
3291 emit a set. This will allow reload to use
3292 rematerialization for such registers. */
3293 if (!insn_inserted_p)
3294 expr->reaching_reg
3295 = gen_reg_rtx_and_attrs (SET_DEST (set));
3296
3297 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3298 insn);
3299 delete_insn (insn);
3300 occr->deleted_p = 1;
3301 changed = 1;
3302 gcse_subst_count++;
3303
3304 if (!insn_inserted_p)
3305 {
3306 insert_insn_end_basic_block (expr, bb);
3307 insn_inserted_p = 1;
3308 }
3309 }
3310
3311 occrs_to_hoist.release ();
3312 bitmap_clear (from_bbs);
3313 }
3314 }
3315 domby.release ();
3316 }
3317
3318 dom_tree_walk.release ();
3319 BITMAP_FREE (from_bbs);
3320 if (flag_ira_hoist_pressure)
3321 BITMAP_FREE (hoisted_bbs);
3322
3323 free (bb_size);
3324 free (to_bb_head);
3325 free (index_map);
3326
3327 return changed;
3328 }
3329
3330 /* Return pressure class and number of needed hard registers (through
3331 *NREGS) of register REGNO. */
3332 static enum reg_class
3333 get_regno_pressure_class (int regno, int *nregs)
3334 {
3335 if (regno >= FIRST_PSEUDO_REGISTER)
3336 {
3337 enum reg_class pressure_class;
3338
3339 pressure_class = reg_allocno_class (regno);
3340 pressure_class = ira_pressure_class_translate[pressure_class];
3341 *nregs
3342 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3343 return pressure_class;
3344 }
3345 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3346 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3347 {
3348 *nregs = 1;
3349 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3350 }
3351 else
3352 {
3353 *nregs = 0;
3354 return NO_REGS;
3355 }
3356 }
3357
3358 /* Return pressure class and number of hard registers (through *NREGS)
3359 for destination of INSN. */
3360 static enum reg_class
3361 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3362 {
3363 rtx reg;
3364 enum reg_class pressure_class;
3365 const_rtx set = single_set_gcse (insn);
3366
3367 reg = SET_DEST (set);
3368 if (GET_CODE (reg) == SUBREG)
3369 reg = SUBREG_REG (reg);
3370 if (MEM_P (reg))
3371 {
3372 *nregs = 0;
3373 pressure_class = NO_REGS;
3374 }
3375 else
3376 {
3377 gcc_assert (REG_P (reg));
3378 pressure_class = reg_allocno_class (REGNO (reg));
3379 pressure_class = ira_pressure_class_translate[pressure_class];
3380 *nregs
3381 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3382 }
3383 return pressure_class;
3384 }
3385
3386 /* Increase (if INCR_P) or decrease current register pressure for
3387 register REGNO. */
3388 static void
3389 change_pressure (int regno, bool incr_p)
3390 {
3391 int nregs;
3392 enum reg_class pressure_class;
3393
3394 pressure_class = get_regno_pressure_class (regno, &nregs);
3395 if (! incr_p)
3396 curr_reg_pressure[pressure_class] -= nregs;
3397 else
3398 {
3399 curr_reg_pressure[pressure_class] += nregs;
3400 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3401 < curr_reg_pressure[pressure_class])
3402 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3403 = curr_reg_pressure[pressure_class];
3404 }
3405 }
3406
3407 /* Calculate register pressure for each basic block by walking insns
3408 from last to first. */
3409 static void
3410 calculate_bb_reg_pressure (void)
3411 {
3412 int i;
3413 unsigned int j;
3414 rtx_insn *insn;
3415 basic_block bb;
3416 bitmap curr_regs_live;
3417 bitmap_iterator bi;
3418
3419
3420 ira_setup_eliminable_regset ();
3421 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3422 FOR_EACH_BB_FN (bb, cfun)
3423 {
3424 curr_bb = bb;
3425 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3426 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3427 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3428 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3429 for (i = 0; i < ira_pressure_classes_num; i++)
3430 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3431 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3432 change_pressure (j, true);
3433
3434 FOR_BB_INSNS_REVERSE (bb, insn)
3435 {
3436 rtx dreg;
3437 int regno;
3438 df_ref def, use;
3439
3440 if (! NONDEBUG_INSN_P (insn))
3441 continue;
3442
3443 FOR_EACH_INSN_DEF (def, insn)
3444 {
3445 dreg = DF_REF_REAL_REG (def);
3446 gcc_assert (REG_P (dreg));
3447 regno = REGNO (dreg);
3448 if (!(DF_REF_FLAGS (def)
3449 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3450 {
3451 if (bitmap_clear_bit (curr_regs_live, regno))
3452 change_pressure (regno, false);
3453 }
3454 }
3455
3456 FOR_EACH_INSN_USE (use, insn)
3457 {
3458 dreg = DF_REF_REAL_REG (use);
3459 gcc_assert (REG_P (dreg));
3460 regno = REGNO (dreg);
3461 if (bitmap_set_bit (curr_regs_live, regno))
3462 change_pressure (regno, true);
3463 }
3464 }
3465 }
3466 BITMAP_FREE (curr_regs_live);
3467
3468 if (dump_file == NULL)
3469 return;
3470
3471 fprintf (dump_file, "\nRegister Pressure: \n");
3472 FOR_EACH_BB_FN (bb, cfun)
3473 {
3474 fprintf (dump_file, " Basic block %d: \n", bb->index);
3475 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3476 {
3477 enum reg_class pressure_class;
3478
3479 pressure_class = ira_pressure_classes[i];
3480 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3481 continue;
3482
3483 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3484 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3485 }
3486 }
3487 fprintf (dump_file, "\n");
3488 }
3489
3490 /* Top level routine to perform one code hoisting (aka unification) pass
3491
3492 Return nonzero if a change was made. */
3493
3494 static int
3495 one_code_hoisting_pass (void)
3496 {
3497 int changed = 0;
3498
3499 gcse_subst_count = 0;
3500 gcse_create_count = 0;
3501
3502 /* Return if there's nothing to do, or it is too expensive. */
3503 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3504 || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3505 return 0;
3506
3507 doing_code_hoisting_p = true;
3508
3509 /* Calculate register pressure for each basic block. */
3510 if (flag_ira_hoist_pressure)
3511 {
3512 regstat_init_n_sets_and_refs ();
3513 ira_set_pseudo_classes (false, dump_file);
3514 alloc_aux_for_blocks (sizeof (struct bb_data));
3515 calculate_bb_reg_pressure ();
3516 regstat_free_n_sets_and_refs ();
3517 }
3518
3519 /* We need alias. */
3520 init_alias_analysis ();
3521
3522 bytes_used = 0;
3523 gcc_obstack_init (&gcse_obstack);
3524 alloc_gcse_mem ();
3525
3526 alloc_hash_table (&expr_hash_table);
3527 compute_hash_table (&expr_hash_table);
3528 if (dump_file)
3529 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3530
3531 if (expr_hash_table.n_elems > 0)
3532 {
3533 alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3534 expr_hash_table.n_elems);
3535 compute_code_hoist_data ();
3536 changed = hoist_code ();
3537 free_code_hoist_mem ();
3538 }
3539
3540 if (flag_ira_hoist_pressure)
3541 {
3542 free_aux_for_blocks ();
3543 free_reg_info ();
3544 }
3545 free_hash_table (&expr_hash_table);
3546 free_gcse_mem ();
3547 obstack_free (&gcse_obstack, NULL);
3548
3549 /* We are finished with alias. */
3550 end_alias_analysis ();
3551
3552 if (dump_file)
3553 {
3554 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3555 current_function_name (), n_basic_blocks_for_fn (cfun),
3556 bytes_used);
3557 fprintf (dump_file, "%d substs, %d insns created\n",
3558 gcse_subst_count, gcse_create_count);
3559 }
3560
3561 doing_code_hoisting_p = false;
3562
3563 return changed;
3564 }
3565 \f
3566 /* Here we provide the things required to do store motion towards the exit.
3567 In order for this to be effective, gcse also needed to be taught how to
3568 move a load when it is killed only by a store to itself.
3569
3570 int i;
3571 float a[10];
3572
3573 void foo(float scale)
3574 {
3575 for (i=0; i<10; i++)
3576 a[i] *= scale;
3577 }
3578
3579 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3580 the load out since its live around the loop, and stored at the bottom
3581 of the loop.
3582
3583 The 'Load Motion' referred to and implemented in this file is
3584 an enhancement to gcse which when using edge based LCM, recognizes
3585 this situation and allows gcse to move the load out of the loop.
3586
3587 Once gcse has hoisted the load, store motion can then push this
3588 load towards the exit, and we end up with no loads or stores of 'i'
3589 in the loop. */
3590
3591 /* This will search the ldst list for a matching expression. If it
3592 doesn't find one, we create one and initialize it. */
3593
3594 static struct ls_expr *
3595 ldst_entry (rtx x)
3596 {
3597 int do_not_record_p = 0;
3598 struct ls_expr * ptr;
3599 unsigned int hash;
3600 ls_expr **slot;
3601 struct ls_expr e;
3602
3603 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3604 NULL, /*have_reg_qty=*/false);
3605
3606 e.pattern = x;
3607 slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3608 if (*slot)
3609 return *slot;
3610
3611 ptr = XNEW (struct ls_expr);
3612
3613 ptr->next = pre_ldst_mems;
3614 ptr->expr = NULL;
3615 ptr->pattern = x;
3616 ptr->pattern_regs = NULL_RTX;
3617 ptr->stores.create (0);
3618 ptr->reaching_reg = NULL_RTX;
3619 ptr->invalid = 0;
3620 ptr->index = 0;
3621 ptr->hash_index = hash;
3622 pre_ldst_mems = ptr;
3623 *slot = ptr;
3624
3625 return ptr;
3626 }
3627
3628 /* Free up an individual ldst entry. */
3629
3630 static void
3631 free_ldst_entry (struct ls_expr * ptr)
3632 {
3633 ptr->stores.release ();
3634
3635 free (ptr);
3636 }
3637
3638 /* Free up all memory associated with the ldst list. */
3639
3640 static void
3641 free_ld_motion_mems (void)
3642 {
3643 delete pre_ldst_table;
3644 pre_ldst_table = NULL;
3645
3646 while (pre_ldst_mems)
3647 {
3648 struct ls_expr * tmp = pre_ldst_mems;
3649
3650 pre_ldst_mems = pre_ldst_mems->next;
3651
3652 free_ldst_entry (tmp);
3653 }
3654
3655 pre_ldst_mems = NULL;
3656 }
3657
3658 /* Dump debugging info about the ldst list. */
3659
3660 static void
3661 print_ldst_list (FILE * file)
3662 {
3663 struct ls_expr * ptr;
3664
3665 fprintf (file, "LDST list: \n");
3666
3667 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3668 {
3669 fprintf (file, " Pattern (%3d): ", ptr->index);
3670
3671 print_rtl (file, ptr->pattern);
3672
3673 fprintf (file, "\n Stores : ");
3674 print_rtx_insn_vec (file, ptr->stores);
3675
3676 fprintf (file, "\n\n");
3677 }
3678
3679 fprintf (file, "\n");
3680 }
3681
3682 /* Returns 1 if X is in the list of ldst only expressions. */
3683
3684 static struct ls_expr *
3685 find_rtx_in_ldst (rtx x)
3686 {
3687 struct ls_expr e;
3688 ls_expr **slot;
3689 if (!pre_ldst_table)
3690 return NULL;
3691 e.pattern = x;
3692 slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3693 if (!slot || (*slot)->invalid)
3694 return NULL;
3695 return *slot;
3696 }
3697 \f
3698 /* Load Motion for loads which only kill themselves. */
3699
3700 /* Return true if x, a MEM, is a simple access with no side effects.
3701 These are the types of loads we consider for the ld_motion list,
3702 otherwise we let the usual aliasing take care of it. */
3703
3704 static int
3705 simple_mem (const_rtx x)
3706 {
3707 if (MEM_VOLATILE_P (x))
3708 return 0;
3709
3710 if (GET_MODE (x) == BLKmode)
3711 return 0;
3712
3713 /* If we are handling exceptions, we must be careful with memory references
3714 that may trap. If we are not, the behavior is undefined, so we may just
3715 continue. */
3716 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3717 return 0;
3718
3719 if (side_effects_p (x))
3720 return 0;
3721
3722 /* Do not consider function arguments passed on stack. */
3723 if (reg_mentioned_p (stack_pointer_rtx, x))
3724 return 0;
3725
3726 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3727 return 0;
3728
3729 return 1;
3730 }
3731
3732 /* Make sure there isn't a buried reference in this pattern anywhere.
3733 If there is, invalidate the entry for it since we're not capable
3734 of fixing it up just yet.. We have to be sure we know about ALL
3735 loads since the aliasing code will allow all entries in the
3736 ld_motion list to not-alias itself. If we miss a load, we will get
3737 the wrong value since gcse might common it and we won't know to
3738 fix it up. */
3739
3740 static void
3741 invalidate_any_buried_refs (rtx x)
3742 {
3743 const char * fmt;
3744 int i, j;
3745 struct ls_expr * ptr;
3746
3747 /* Invalidate it in the list. */
3748 if (MEM_P (x) && simple_mem (x))
3749 {
3750 ptr = ldst_entry (x);
3751 ptr->invalid = 1;
3752 }
3753
3754 /* Recursively process the insn. */
3755 fmt = GET_RTX_FORMAT (GET_CODE (x));
3756
3757 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3758 {
3759 if (fmt[i] == 'e')
3760 invalidate_any_buried_refs (XEXP (x, i));
3761 else if (fmt[i] == 'E')
3762 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3763 invalidate_any_buried_refs (XVECEXP (x, i, j));
3764 }
3765 }
3766
3767 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3768 being defined as MEM loads and stores to symbols, with no side effects
3769 and no registers in the expression. For a MEM destination, we also
3770 check that the insn is still valid if we replace the destination with a
3771 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3772 which don't match this criteria, they are invalidated and trimmed out
3773 later. */
3774
3775 static void
3776 compute_ld_motion_mems (void)
3777 {
3778 struct ls_expr * ptr;
3779 basic_block bb;
3780 rtx_insn *insn;
3781
3782 pre_ldst_mems = NULL;
3783 pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3784
3785 FOR_EACH_BB_FN (bb, cfun)
3786 {
3787 FOR_BB_INSNS (bb, insn)
3788 {
3789 if (NONDEBUG_INSN_P (insn))
3790 {
3791 if (GET_CODE (PATTERN (insn)) == SET)
3792 {
3793 rtx src = SET_SRC (PATTERN (insn));
3794 rtx dest = SET_DEST (PATTERN (insn));
3795
3796 /* Check for a simple load. */
3797 if (MEM_P (src) && simple_mem (src))
3798 {
3799 ptr = ldst_entry (src);
3800 if (!REG_P (dest))
3801 ptr->invalid = 1;
3802 }
3803 else
3804 {
3805 /* Make sure there isn't a buried load somewhere. */
3806 invalidate_any_buried_refs (src);
3807 }
3808
3809 /* Check for a simple load through a REG_EQUAL note. */
3810 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3811 if (note
3812 && REG_NOTE_KIND (note) == REG_EQUAL
3813 && (src_eq = XEXP (note, 0))
3814 && !(MEM_P (src_eq) && simple_mem (src_eq)))
3815 invalidate_any_buried_refs (src_eq);
3816
3817 /* Check for stores. Don't worry about aliased ones, they
3818 will block any movement we might do later. We only care
3819 about this exact pattern since those are the only
3820 circumstance that we will ignore the aliasing info. */
3821 if (MEM_P (dest) && simple_mem (dest))
3822 {
3823 ptr = ldst_entry (dest);
3824 machine_mode src_mode = GET_MODE (src);
3825 if (! MEM_P (src)
3826 && GET_CODE (src) != ASM_OPERANDS
3827 /* Check for REG manually since want_to_gcse_p
3828 returns 0 for all REGs. */
3829 && can_assign_to_reg_without_clobbers_p (src,
3830 src_mode))
3831 ptr->stores.safe_push (insn);
3832 else
3833 ptr->invalid = 1;
3834 }
3835 }
3836 else
3837 {
3838 /* Invalidate all MEMs in the pattern and... */
3839 invalidate_any_buried_refs (PATTERN (insn));
3840
3841 /* ...in REG_EQUAL notes for PARALLELs with single SET. */
3842 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3843 if (note
3844 && REG_NOTE_KIND (note) == REG_EQUAL
3845 && (src_eq = XEXP (note, 0)))
3846 invalidate_any_buried_refs (src_eq);
3847 }
3848 }
3849 }
3850 }
3851 }
3852
3853 /* Remove any references that have been either invalidated or are not in the
3854 expression list for pre gcse. */
3855
3856 static void
3857 trim_ld_motion_mems (void)
3858 {
3859 struct ls_expr * * last = & pre_ldst_mems;
3860 struct ls_expr * ptr = pre_ldst_mems;
3861
3862 while (ptr != NULL)
3863 {
3864 struct gcse_expr * expr;
3865
3866 /* Delete if entry has been made invalid. */
3867 if (! ptr->invalid)
3868 {
3869 /* Delete if we cannot find this mem in the expression list. */
3870 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3871
3872 for (expr = expr_hash_table.table[hash];
3873 expr != NULL;
3874 expr = expr->next_same_hash)
3875 if (expr_equiv_p (expr->expr, ptr->pattern))
3876 break;
3877 }
3878 else
3879 expr = (struct gcse_expr *) 0;
3880
3881 if (expr)
3882 {
3883 /* Set the expression field if we are keeping it. */
3884 ptr->expr = expr;
3885 last = & ptr->next;
3886 ptr = ptr->next;
3887 }
3888 else
3889 {
3890 *last = ptr->next;
3891 pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3892 free_ldst_entry (ptr);
3893 ptr = * last;
3894 }
3895 }
3896
3897 /* Show the world what we've found. */
3898 if (dump_file && pre_ldst_mems != NULL)
3899 print_ldst_list (dump_file);
3900 }
3901
3902 /* This routine will take an expression which we are replacing with
3903 a reaching register, and update any stores that are needed if
3904 that expression is in the ld_motion list. Stores are updated by
3905 copying their SRC to the reaching register, and then storing
3906 the reaching register into the store location. These keeps the
3907 correct value in the reaching register for the loads. */
3908
3909 static void
3910 update_ld_motion_stores (struct gcse_expr * expr)
3911 {
3912 struct ls_expr * mem_ptr;
3913
3914 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3915 {
3916 /* We can try to find just the REACHED stores, but is shouldn't
3917 matter to set the reaching reg everywhere... some might be
3918 dead and should be eliminated later. */
3919
3920 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3921 where reg is the reaching reg used in the load. We checked in
3922 compute_ld_motion_mems that we can replace (set mem expr) with
3923 (set reg expr) in that insn. */
3924 rtx_insn *insn;
3925 unsigned int i;
3926 FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3927 {
3928 rtx pat = PATTERN (insn);
3929 rtx src = SET_SRC (pat);
3930 rtx reg = expr->reaching_reg;
3931
3932 /* If we've already copied it, continue. */
3933 if (expr->reaching_reg == src)
3934 continue;
3935
3936 if (dump_file)
3937 {
3938 fprintf (dump_file, "PRE: store updated with reaching reg ");
3939 print_rtl (dump_file, reg);
3940 fprintf (dump_file, ":\n ");
3941 print_inline_rtx (dump_file, insn, 8);
3942 fprintf (dump_file, "\n");
3943 }
3944
3945 rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3946 emit_insn_before (copy, insn);
3947 SET_SRC (pat) = reg;
3948 df_insn_rescan (insn);
3949
3950 /* un-recognize this pattern since it's probably different now. */
3951 INSN_CODE (insn) = -1;
3952 gcse_create_count++;
3953 }
3954 }
3955 }
3956 \f
3957 /* Return true if the graph is too expensive to optimize. PASS is the
3958 optimization about to be performed. */
3959
3960 bool
3961 gcse_or_cprop_is_too_expensive (const char *pass)
3962 {
3963 unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
3964 * SBITMAP_SET_SIZE (max_reg_num ())
3965 * sizeof (SBITMAP_ELT_TYPE));
3966
3967 /* Trying to perform global optimizations on flow graphs which have
3968 a high connectivity will take a long time and is unlikely to be
3969 particularly useful.
3970
3971 In normal circumstances a cfg should have about twice as many
3972 edges as blocks. But we do not want to punish small functions
3973 which have a couple switch statements. Rather than simply
3974 threshold the number of blocks, uses something with a more
3975 graceful degradation. */
3976 if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3977 {
3978 warning (OPT_Wdisabled_optimization,
3979 "%s: %d basic blocks and %d edges/basic block",
3980 pass, n_basic_blocks_for_fn (cfun),
3981 n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
3982
3983 return true;
3984 }
3985
3986 /* If allocating memory for the dataflow bitmaps would take up too much
3987 storage it's better just to disable the optimization. */
3988 if (memory_request > MAX_GCSE_MEMORY)
3989 {
3990 warning (OPT_Wdisabled_optimization,
3991 "%s: %d basic blocks and %d registers; increase --param max-gcse-memory above %d",
3992 pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
3993 memory_request);
3994
3995 return true;
3996 }
3997
3998 return false;
3999 }
4000 \f
4001 static unsigned int
4002 execute_rtl_pre (void)
4003 {
4004 int changed;
4005 delete_unreachable_blocks ();
4006 df_analyze ();
4007 changed = one_pre_gcse_pass ();
4008 flag_rerun_cse_after_global_opts |= changed;
4009 if (changed)
4010 cleanup_cfg (0);
4011 return 0;
4012 }
4013
4014 static unsigned int
4015 execute_rtl_hoist (void)
4016 {
4017 int changed;
4018 delete_unreachable_blocks ();
4019 df_analyze ();
4020 changed = one_code_hoisting_pass ();
4021 flag_rerun_cse_after_global_opts |= changed;
4022 if (changed)
4023 cleanup_cfg (0);
4024 return 0;
4025 }
4026
4027 namespace {
4028
4029 const pass_data pass_data_rtl_pre =
4030 {
4031 RTL_PASS, /* type */
4032 "rtl pre", /* name */
4033 OPTGROUP_NONE, /* optinfo_flags */
4034 TV_PRE, /* tv_id */
4035 PROP_cfglayout, /* properties_required */
4036 0, /* properties_provided */
4037 0, /* properties_destroyed */
4038 0, /* todo_flags_start */
4039 TODO_df_finish, /* todo_flags_finish */
4040 };
4041
4042 class pass_rtl_pre : public rtl_opt_pass
4043 {
4044 public:
4045 pass_rtl_pre (gcc::context *ctxt)
4046 : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4047 {}
4048
4049 /* opt_pass methods: */
4050 virtual bool gate (function *);
4051 virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4052
4053 }; // class pass_rtl_pre
4054
4055 /* We do not construct an accurate cfg in functions which call
4056 setjmp, so none of these passes runs if the function calls
4057 setjmp.
4058 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4059
4060 bool
4061 pass_rtl_pre::gate (function *fun)
4062 {
4063 return optimize > 0 && flag_gcse
4064 && !fun->calls_setjmp
4065 && optimize_function_for_speed_p (fun)
4066 && dbg_cnt (pre);
4067 }
4068
4069 } // anon namespace
4070
4071 rtl_opt_pass *
4072 make_pass_rtl_pre (gcc::context *ctxt)
4073 {
4074 return new pass_rtl_pre (ctxt);
4075 }
4076
4077 namespace {
4078
4079 const pass_data pass_data_rtl_hoist =
4080 {
4081 RTL_PASS, /* type */
4082 "hoist", /* name */
4083 OPTGROUP_NONE, /* optinfo_flags */
4084 TV_HOIST, /* tv_id */
4085 PROP_cfglayout, /* properties_required */
4086 0, /* properties_provided */
4087 0, /* properties_destroyed */
4088 0, /* todo_flags_start */
4089 TODO_df_finish, /* todo_flags_finish */
4090 };
4091
4092 class pass_rtl_hoist : public rtl_opt_pass
4093 {
4094 public:
4095 pass_rtl_hoist (gcc::context *ctxt)
4096 : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4097 {}
4098
4099 /* opt_pass methods: */
4100 virtual bool gate (function *);
4101 virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4102
4103 }; // class pass_rtl_hoist
4104
4105 bool
4106 pass_rtl_hoist::gate (function *)
4107 {
4108 return optimize > 0 && flag_gcse
4109 && !cfun->calls_setjmp
4110 /* It does not make sense to run code hoisting unless we are optimizing
4111 for code size -- it rarely makes programs faster, and can make then
4112 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4113 && optimize_function_for_size_p (cfun)
4114 && dbg_cnt (hoist);
4115 }
4116
4117 } // anon namespace
4118
4119 rtl_opt_pass *
4120 make_pass_rtl_hoist (gcc::context *ctxt)
4121 {
4122 return new pass_rtl_hoist (ctxt);
4123 }
4124
4125 /* Reset all state within gcse.c so that we can rerun the compiler
4126 within the same process. For use by toplev::finalize. */
4127
4128 void
4129 gcse_c_finalize (void)
4130 {
4131 test_insn = NULL;
4132 }
4133
4134 #include "gt-gcse.h"