]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gcse.c
2012-10-23 Vladimir Makarov <vmakarov@redhat.com>
[thirdparty/gcc.git] / gcc / gcse.c
1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* TODO
22 - reordering of memory allocation and freeing to be more space efficient
23 - simulate register pressure change of each basic block accurately during
24 hoist process. But I doubt the benefit since most expressions hoisted
25 are constant or address, which usually won't reduce register pressure.
26 - calc rough register pressure information and use the info to drive all
27 kinds of code motion (including code hoisting) in a unified way.
28 */
29
30 /* References searched while implementing this.
31
32 Compilers Principles, Techniques and Tools
33 Aho, Sethi, Ullman
34 Addison-Wesley, 1988
35
36 Global Optimization by Suppression of Partial Redundancies
37 E. Morel, C. Renvoise
38 communications of the acm, Vol. 22, Num. 2, Feb. 1979
39
40 A Portable Machine-Independent Global Optimizer - Design and Measurements
41 Frederick Chow
42 Stanford Ph.D. thesis, Dec. 1983
43
44 A Fast Algorithm for Code Movement Optimization
45 D.M. Dhamdhere
46 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
47
48 A Solution to a Problem with Morel and Renvoise's
49 Global Optimization by Suppression of Partial Redundancies
50 K-H Drechsler, M.P. Stadel
51 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
52
53 Practical Adaptation of the Global Optimization
54 Algorithm of Morel and Renvoise
55 D.M. Dhamdhere
56 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
57
58 Efficiently Computing Static Single Assignment Form and the Control
59 Dependence Graph
60 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
61 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
62
63 Lazy Code Motion
64 J. Knoop, O. Ruthing, B. Steffen
65 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
66
67 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
68 Time for Reducible Flow Control
69 Thomas Ball
70 ACM Letters on Programming Languages and Systems,
71 Vol. 2, Num. 1-4, Mar-Dec 1993
72
73 An Efficient Representation for Sparse Sets
74 Preston Briggs, Linda Torczon
75 ACM Letters on Programming Languages and Systems,
76 Vol. 2, Num. 1-4, Mar-Dec 1993
77
78 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
79 K-H Drechsler, M.P. Stadel
80 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
81
82 Partial Dead Code Elimination
83 J. Knoop, O. Ruthing, B. Steffen
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 Effective Partial Redundancy Elimination
87 P. Briggs, K.D. Cooper
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 The Program Structure Tree: Computing Control Regions in Linear Time
91 R. Johnson, D. Pearson, K. Pingali
92 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
93
94 Optimal Code Motion: Theory and Practice
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
97
98 The power of assignment motion
99 J. Knoop, O. Ruthing, B. Steffen
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Global code motion / global value numbering
103 C. Click
104 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
105
106 Value Driven Redundancy Elimination
107 L.T. Simpson
108 Rice University Ph.D. thesis, Apr. 1996
109
110 Value Numbering
111 L.T. Simpson
112 Massively Scalar Compiler Project, Rice University, Sep. 1996
113
114 High Performance Compilers for Parallel Computing
115 Michael Wolfe
116 Addison-Wesley, 1996
117
118 Advanced Compiler Design and Implementation
119 Steven Muchnick
120 Morgan Kaufmann, 1997
121
122 Building an Optimizing Compiler
123 Robert Morgan
124 Digital Press, 1998
125
126 People wishing to speed up the code here should read:
127 Elimination Algorithms for Data Flow Analysis
128 B.G. Ryder, M.C. Paull
129 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
130
131 How to Analyze Large Programs Efficiently and Informatively
132 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
133 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
134
135 People wishing to do something different can find various possibilities
136 in the above papers and elsewhere.
137 */
138
139 #include "config.h"
140 #include "system.h"
141 #include "coretypes.h"
142 #include "tm.h"
143 #include "diagnostic-core.h"
144 #include "toplev.h"
145
146 #include "hard-reg-set.h"
147 #include "rtl.h"
148 #include "tree.h"
149 #include "tm_p.h"
150 #include "regs.h"
151 #include "ira.h"
152 #include "flags.h"
153 #include "insn-config.h"
154 #include "recog.h"
155 #include "basic-block.h"
156 #include "function.h"
157 #include "expr.h"
158 #include "except.h"
159 #include "ggc.h"
160 #include "params.h"
161 #include "cselib.h"
162 #include "intl.h"
163 #include "obstack.h"
164 #include "tree-pass.h"
165 #include "hashtab.h"
166 #include "df.h"
167 #include "dbgcnt.h"
168 #include "target.h"
169 #include "gcse.h"
170
171 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
172 are a superset of those done by classic GCSE.
173
174 Two passes of copy/constant propagation are done around PRE or hoisting
175 because the first one enables more GCSE and the second one helps to clean
176 up the copies that PRE and HOIST create. This is needed more for PRE than
177 for HOIST because code hoisting will try to use an existing register
178 containing the common subexpression rather than create a new one. This is
179 harder to do for PRE because of the code motion (which HOIST doesn't do).
180
181 Expressions we are interested in GCSE-ing are of the form
182 (set (pseudo-reg) (expression)).
183 Function want_to_gcse_p says what these are.
184
185 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
186 This allows PRE to hoist expressions that are expressed in multiple insns,
187 such as complex address calculations (e.g. for PIC code, or loads with a
188 high part and a low part).
189
190 PRE handles moving invariant expressions out of loops (by treating them as
191 partially redundant).
192
193 **********************
194
195 We used to support multiple passes but there are diminishing returns in
196 doing so. The first pass usually makes 90% of the changes that are doable.
197 A second pass can make a few more changes made possible by the first pass.
198 Experiments show any further passes don't make enough changes to justify
199 the expense.
200
201 A study of spec92 using an unlimited number of passes:
202 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
203 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
204 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
205
206 It was found doing copy propagation between each pass enables further
207 substitutions.
208
209 This study was done before expressions in REG_EQUAL notes were added as
210 candidate expressions for optimization, and before the GIMPLE optimizers
211 were added. Probably, multiple passes is even less efficient now than
212 at the time when the study was conducted.
213
214 PRE is quite expensive in complicated functions because the DFA can take
215 a while to converge. Hence we only perform one pass.
216
217 **********************
218
219 The steps for PRE are:
220
221 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
222
223 2) Perform the data flow analysis for PRE.
224
225 3) Delete the redundant instructions
226
227 4) Insert the required copies [if any] that make the partially
228 redundant instructions fully redundant.
229
230 5) For other reaching expressions, insert an instruction to copy the value
231 to a newly created pseudo that will reach the redundant instruction.
232
233 The deletion is done first so that when we do insertions we
234 know which pseudo reg to use.
235
236 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
237 argue it is not. The number of iterations for the algorithm to converge
238 is typically 2-4 so I don't view it as that expensive (relatively speaking).
239
240 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
241 we create. To make an expression reach the place where it's redundant,
242 the result of the expression is copied to a new register, and the redundant
243 expression is deleted by replacing it with this new register. Classic GCSE
244 doesn't have this problem as much as it computes the reaching defs of
245 each register in each block and thus can try to use an existing
246 register. */
247 \f
248 /* GCSE global vars. */
249
250 struct target_gcse default_target_gcse;
251 #if SWITCHABLE_TARGET
252 struct target_gcse *this_target_gcse = &default_target_gcse;
253 #endif
254
255 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
256 int flag_rerun_cse_after_global_opts;
257
258 /* An obstack for our working variables. */
259 static struct obstack gcse_obstack;
260
261 struct reg_use {rtx reg_rtx; };
262
263 /* Hash table of expressions. */
264
265 struct expr
266 {
267 /* The expression. */
268 rtx expr;
269 /* Index in the available expression bitmaps. */
270 int bitmap_index;
271 /* Next entry with the same hash. */
272 struct expr *next_same_hash;
273 /* List of anticipatable occurrences in basic blocks in the function.
274 An "anticipatable occurrence" is one that is the first occurrence in the
275 basic block, the operands are not modified in the basic block prior
276 to the occurrence and the output is not used between the start of
277 the block and the occurrence. */
278 struct occr *antic_occr;
279 /* List of available occurrence in basic blocks in the function.
280 An "available occurrence" is one that is the last occurrence in the
281 basic block and the operands are not modified by following statements in
282 the basic block [including this insn]. */
283 struct occr *avail_occr;
284 /* Non-null if the computation is PRE redundant.
285 The value is the newly created pseudo-reg to record a copy of the
286 expression in all the places that reach the redundant copy. */
287 rtx reaching_reg;
288 /* Maximum distance in instructions this expression can travel.
289 We avoid moving simple expressions for more than a few instructions
290 to keep register pressure under control.
291 A value of "0" removes restrictions on how far the expression can
292 travel. */
293 int max_distance;
294 };
295
296 /* Occurrence of an expression.
297 There is one per basic block. If a pattern appears more than once the
298 last appearance is used [or first for anticipatable expressions]. */
299
300 struct occr
301 {
302 /* Next occurrence of this expression. */
303 struct occr *next;
304 /* The insn that computes the expression. */
305 rtx insn;
306 /* Nonzero if this [anticipatable] occurrence has been deleted. */
307 char deleted_p;
308 /* Nonzero if this [available] occurrence has been copied to
309 reaching_reg. */
310 /* ??? This is mutually exclusive with deleted_p, so they could share
311 the same byte. */
312 char copied_p;
313 };
314
315 typedef struct occr *occr_t;
316 DEF_VEC_P (occr_t);
317 DEF_VEC_ALLOC_P (occr_t, heap);
318
319 /* Expression hash tables.
320 Each hash table is an array of buckets.
321 ??? It is known that if it were an array of entries, structure elements
322 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
323 not clear whether in the final analysis a sufficient amount of memory would
324 be saved as the size of the available expression bitmaps would be larger
325 [one could build a mapping table without holes afterwards though].
326 Someday I'll perform the computation and figure it out. */
327
328 struct hash_table_d
329 {
330 /* The table itself.
331 This is an array of `expr_hash_table_size' elements. */
332 struct expr **table;
333
334 /* Size of the hash table, in elements. */
335 unsigned int size;
336
337 /* Number of hash table elements. */
338 unsigned int n_elems;
339 };
340
341 /* Expression hash table. */
342 static struct hash_table_d expr_hash_table;
343
344 /* This is a list of expressions which are MEMs and will be used by load
345 or store motion.
346 Load motion tracks MEMs which aren't killed by anything except itself,
347 i.e. loads and stores to a single location.
348 We can then allow movement of these MEM refs with a little special
349 allowance. (all stores copy the same value to the reaching reg used
350 for the loads). This means all values used to store into memory must have
351 no side effects so we can re-issue the setter value. */
352
353 struct ls_expr
354 {
355 struct expr * expr; /* Gcse expression reference for LM. */
356 rtx pattern; /* Pattern of this mem. */
357 rtx pattern_regs; /* List of registers mentioned by the mem. */
358 rtx loads; /* INSN list of loads seen. */
359 rtx stores; /* INSN list of stores seen. */
360 struct ls_expr * next; /* Next in the list. */
361 int invalid; /* Invalid for some reason. */
362 int index; /* If it maps to a bitmap index. */
363 unsigned int hash_index; /* Index when in a hash table. */
364 rtx reaching_reg; /* Register to use when re-writing. */
365 };
366
367 /* Head of the list of load/store memory refs. */
368 static struct ls_expr * pre_ldst_mems = NULL;
369
370 /* Hashtable for the load/store memory refs. */
371 static htab_t pre_ldst_table = NULL;
372
373 /* Bitmap containing one bit for each register in the program.
374 Used when performing GCSE to track which registers have been set since
375 the start of the basic block. */
376 static regset reg_set_bitmap;
377
378 /* Array, indexed by basic block number for a list of insns which modify
379 memory within that block. */
380 static VEC (rtx,heap) **modify_mem_list;
381 static bitmap modify_mem_list_set;
382
383 typedef struct modify_pair_s
384 {
385 rtx dest; /* A MEM. */
386 rtx dest_addr; /* The canonical address of `dest'. */
387 } modify_pair;
388
389 DEF_VEC_O(modify_pair);
390 DEF_VEC_ALLOC_O(modify_pair,heap);
391
392 /* This array parallels modify_mem_list, except that it stores MEMs
393 being set and their canonicalized memory addresses. */
394 static VEC (modify_pair,heap) **canon_modify_mem_list;
395
396 /* Bitmap indexed by block numbers to record which blocks contain
397 function calls. */
398 static bitmap blocks_with_calls;
399
400 /* Various variables for statistics gathering. */
401
402 /* Memory used in a pass.
403 This isn't intended to be absolutely precise. Its intent is only
404 to keep an eye on memory usage. */
405 static int bytes_used;
406
407 /* GCSE substitutions made. */
408 static int gcse_subst_count;
409 /* Number of copy instructions created. */
410 static int gcse_create_count;
411 \f
412 /* Doing code hoisting. */
413 static bool doing_code_hoisting_p = false;
414 \f
415 /* For available exprs */
416 static sbitmap *ae_kill;
417 \f
418 /* Data stored for each basic block. */
419 struct bb_data
420 {
421 /* Maximal register pressure inside basic block for given register class
422 (defined only for the pressure classes). */
423 int max_reg_pressure[N_REG_CLASSES];
424 };
425
426 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
427
428 static basic_block curr_bb;
429
430 /* Current register pressure for each pressure class. */
431 static int curr_reg_pressure[N_REG_CLASSES];
432 \f
433
434 static void compute_can_copy (void);
435 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
436 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
437 static void *gcse_alloc (unsigned long);
438 static void alloc_gcse_mem (void);
439 static void free_gcse_mem (void);
440 static void hash_scan_insn (rtx, struct hash_table_d *);
441 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
442 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
443 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
444 static int want_to_gcse_p (rtx, int *);
445 static int oprs_unchanged_p (const_rtx, const_rtx, int);
446 static int oprs_anticipatable_p (const_rtx, const_rtx);
447 static int oprs_available_p (const_rtx, const_rtx);
448 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
449 struct hash_table_d *);
450 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
451 static int expr_equiv_p (const_rtx, const_rtx);
452 static void record_last_reg_set_info (rtx, int);
453 static void record_last_mem_set_info (rtx);
454 static void record_last_set_info (rtx, const_rtx, void *);
455 static void compute_hash_table (struct hash_table_d *);
456 static void alloc_hash_table (struct hash_table_d *);
457 static void free_hash_table (struct hash_table_d *);
458 static void compute_hash_table_work (struct hash_table_d *);
459 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
460 static void compute_transp (const_rtx, int, sbitmap *);
461 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
462 struct hash_table_d *);
463 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
464 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
465 static void canon_list_insert (rtx, const_rtx, void *);
466 static void alloc_pre_mem (int, int);
467 static void free_pre_mem (void);
468 static struct edge_list *compute_pre_data (void);
469 static int pre_expr_reaches_here_p (basic_block, struct expr *,
470 basic_block);
471 static void insert_insn_end_basic_block (struct expr *, basic_block);
472 static void pre_insert_copy_insn (struct expr *, rtx);
473 static void pre_insert_copies (void);
474 static int pre_delete (void);
475 static int pre_gcse (struct edge_list *);
476 static int one_pre_gcse_pass (void);
477 static void add_label_notes (rtx, rtx);
478 static void alloc_code_hoist_mem (int, int);
479 static void free_code_hoist_mem (void);
480 static void compute_code_hoist_vbeinout (void);
481 static void compute_code_hoist_data (void);
482 static int should_hoist_expr_to_dom (basic_block, struct expr *, basic_block,
483 sbitmap, int, int *, enum reg_class,
484 int *, bitmap);
485 static int hoist_code (void);
486 static enum reg_class get_pressure_class_and_nregs (rtx insn, int *nregs);
487 static int one_code_hoisting_pass (void);
488 static rtx process_insert_insn (struct expr *);
489 static int pre_edge_insert (struct edge_list *, struct expr **);
490 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
491 basic_block, char *);
492 static struct ls_expr * ldst_entry (rtx);
493 static void free_ldst_entry (struct ls_expr *);
494 static void free_ld_motion_mems (void);
495 static void print_ldst_list (FILE *);
496 static struct ls_expr * find_rtx_in_ldst (rtx);
497 static int simple_mem (const_rtx);
498 static void invalidate_any_buried_refs (rtx);
499 static void compute_ld_motion_mems (void);
500 static void trim_ld_motion_mems (void);
501 static void update_ld_motion_stores (struct expr *);
502 static void clear_modify_mem_tables (void);
503 static void free_modify_mem_tables (void);
504 static rtx gcse_emit_move_after (rtx, rtx, rtx);
505 static bool is_too_expensive (const char *);
506
507 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
508 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
509
510 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
511 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
512
513 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
514 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
515
516 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
517 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
518 \f
519 /* Misc. utilities. */
520
521 #define can_copy \
522 (this_target_gcse->x_can_copy)
523 #define can_copy_init_p \
524 (this_target_gcse->x_can_copy_init_p)
525
526 /* Compute which modes support reg/reg copy operations. */
527
528 static void
529 compute_can_copy (void)
530 {
531 int i;
532 #ifndef AVOID_CCMODE_COPIES
533 rtx reg, insn;
534 #endif
535 memset (can_copy, 0, NUM_MACHINE_MODES);
536
537 start_sequence ();
538 for (i = 0; i < NUM_MACHINE_MODES; i++)
539 if (GET_MODE_CLASS (i) == MODE_CC)
540 {
541 #ifdef AVOID_CCMODE_COPIES
542 can_copy[i] = 0;
543 #else
544 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
545 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
546 if (recog (PATTERN (insn), insn, NULL) >= 0)
547 can_copy[i] = 1;
548 #endif
549 }
550 else
551 can_copy[i] = 1;
552
553 end_sequence ();
554 }
555
556 /* Returns whether the mode supports reg/reg copy operations. */
557
558 bool
559 can_copy_p (enum machine_mode mode)
560 {
561 if (! can_copy_init_p)
562 {
563 compute_can_copy ();
564 can_copy_init_p = true;
565 }
566
567 return can_copy[mode] != 0;
568 }
569 \f
570 /* Cover function to xmalloc to record bytes allocated. */
571
572 static void *
573 gmalloc (size_t size)
574 {
575 bytes_used += size;
576 return xmalloc (size);
577 }
578
579 /* Cover function to xcalloc to record bytes allocated. */
580
581 static void *
582 gcalloc (size_t nelem, size_t elsize)
583 {
584 bytes_used += nelem * elsize;
585 return xcalloc (nelem, elsize);
586 }
587
588 /* Cover function to obstack_alloc. */
589
590 static void *
591 gcse_alloc (unsigned long size)
592 {
593 bytes_used += size;
594 return obstack_alloc (&gcse_obstack, size);
595 }
596
597 /* Allocate memory for the reg/memory set tracking tables.
598 This is called at the start of each pass. */
599
600 static void
601 alloc_gcse_mem (void)
602 {
603 /* Allocate vars to track sets of regs. */
604 reg_set_bitmap = ALLOC_REG_SET (NULL);
605
606 /* Allocate array to keep a list of insns which modify memory in each
607 basic block. */
608 modify_mem_list = GCNEWVEC (VEC (rtx,heap) *, last_basic_block);
609 canon_modify_mem_list = GCNEWVEC (VEC (modify_pair,heap) *,
610 last_basic_block);
611 modify_mem_list_set = BITMAP_ALLOC (NULL);
612 blocks_with_calls = BITMAP_ALLOC (NULL);
613 }
614
615 /* Free memory allocated by alloc_gcse_mem. */
616
617 static void
618 free_gcse_mem (void)
619 {
620 FREE_REG_SET (reg_set_bitmap);
621
622 free_modify_mem_tables ();
623 BITMAP_FREE (modify_mem_list_set);
624 BITMAP_FREE (blocks_with_calls);
625 }
626 \f
627 /* Compute the local properties of each recorded expression.
628
629 Local properties are those that are defined by the block, irrespective of
630 other blocks.
631
632 An expression is transparent in a block if its operands are not modified
633 in the block.
634
635 An expression is computed (locally available) in a block if it is computed
636 at least once and expression would contain the same value if the
637 computation was moved to the end of the block.
638
639 An expression is locally anticipatable in a block if it is computed at
640 least once and expression would contain the same value if the computation
641 was moved to the beginning of the block.
642
643 We call this routine for pre and code hoisting. They all compute
644 basically the same information and thus can easily share this code.
645
646 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
647 properties. If NULL, then it is not necessary to compute or record that
648 particular property.
649
650 TABLE controls which hash table to look at. */
651
652 static void
653 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
654 struct hash_table_d *table)
655 {
656 unsigned int i;
657
658 /* Initialize any bitmaps that were passed in. */
659 if (transp)
660 {
661 sbitmap_vector_ones (transp, last_basic_block);
662 }
663
664 if (comp)
665 sbitmap_vector_zero (comp, last_basic_block);
666 if (antloc)
667 sbitmap_vector_zero (antloc, last_basic_block);
668
669 for (i = 0; i < table->size; i++)
670 {
671 struct expr *expr;
672
673 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
674 {
675 int indx = expr->bitmap_index;
676 struct occr *occr;
677
678 /* The expression is transparent in this block if it is not killed.
679 We start by assuming all are transparent [none are killed], and
680 then reset the bits for those that are. */
681 if (transp)
682 compute_transp (expr->expr, indx, transp);
683
684 /* The occurrences recorded in antic_occr are exactly those that
685 we want to set to nonzero in ANTLOC. */
686 if (antloc)
687 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
688 {
689 SET_BIT (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
690
691 /* While we're scanning the table, this is a good place to
692 initialize this. */
693 occr->deleted_p = 0;
694 }
695
696 /* The occurrences recorded in avail_occr are exactly those that
697 we want to set to nonzero in COMP. */
698 if (comp)
699 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
700 {
701 SET_BIT (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
702
703 /* While we're scanning the table, this is a good place to
704 initialize this. */
705 occr->copied_p = 0;
706 }
707
708 /* While we're scanning the table, this is a good place to
709 initialize this. */
710 expr->reaching_reg = 0;
711 }
712 }
713 }
714 \f
715 /* Hash table support. */
716
717 struct reg_avail_info
718 {
719 basic_block last_bb;
720 int first_set;
721 int last_set;
722 };
723
724 static struct reg_avail_info *reg_avail_info;
725 static basic_block current_bb;
726
727 /* See whether X, the source of a set, is something we want to consider for
728 GCSE. */
729
730 static int
731 want_to_gcse_p (rtx x, int *max_distance_ptr)
732 {
733 #ifdef STACK_REGS
734 /* On register stack architectures, don't GCSE constants from the
735 constant pool, as the benefits are often swamped by the overhead
736 of shuffling the register stack between basic blocks. */
737 if (IS_STACK_MODE (GET_MODE (x)))
738 x = avoid_constant_pool_reference (x);
739 #endif
740
741 /* GCSE'ing constants:
742
743 We do not specifically distinguish between constant and non-constant
744 expressions in PRE and Hoist. We use set_src_cost below to limit
745 the maximum distance simple expressions can travel.
746
747 Nevertheless, constants are much easier to GCSE, and, hence,
748 it is easy to overdo the optimizations. Usually, excessive PRE and
749 Hoisting of constant leads to increased register pressure.
750
751 RA can deal with this by rematerialing some of the constants.
752 Therefore, it is important that the back-end generates sets of constants
753 in a way that allows reload rematerialize them under high register
754 pressure, i.e., a pseudo register with REG_EQUAL to constant
755 is set only once. Failing to do so will result in IRA/reload
756 spilling such constants under high register pressure instead of
757 rematerializing them. */
758
759 switch (GET_CODE (x))
760 {
761 case REG:
762 case SUBREG:
763 case CALL:
764 return 0;
765
766 CASE_CONST_ANY:
767 if (!doing_code_hoisting_p)
768 /* Do not PRE constants. */
769 return 0;
770
771 /* FALLTHRU */
772
773 default:
774 if (doing_code_hoisting_p)
775 /* PRE doesn't implement max_distance restriction. */
776 {
777 int cost;
778 int max_distance;
779
780 gcc_assert (!optimize_function_for_speed_p (cfun)
781 && optimize_function_for_size_p (cfun));
782 cost = set_src_cost (x, 0);
783
784 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
785 {
786 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
787 if (max_distance == 0)
788 return 0;
789
790 gcc_assert (max_distance > 0);
791 }
792 else
793 max_distance = 0;
794
795 if (max_distance_ptr)
796 *max_distance_ptr = max_distance;
797 }
798
799 return can_assign_to_reg_without_clobbers_p (x);
800 }
801 }
802
803 /* Used internally by can_assign_to_reg_without_clobbers_p. */
804
805 static GTY(()) rtx test_insn;
806
807 /* Return true if we can assign X to a pseudo register such that the
808 resulting insn does not result in clobbering a hard register as a
809 side-effect.
810
811 Additionally, if the target requires it, check that the resulting insn
812 can be copied. If it cannot, this means that X is special and probably
813 has hidden side-effects we don't want to mess with.
814
815 This function is typically used by code motion passes, to verify
816 that it is safe to insert an insn without worrying about clobbering
817 maybe live hard regs. */
818
819 bool
820 can_assign_to_reg_without_clobbers_p (rtx x)
821 {
822 int num_clobbers = 0;
823 int icode;
824
825 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
826 if (general_operand (x, GET_MODE (x)))
827 return 1;
828 else if (GET_MODE (x) == VOIDmode)
829 return 0;
830
831 /* Otherwise, check if we can make a valid insn from it. First initialize
832 our test insn if we haven't already. */
833 if (test_insn == 0)
834 {
835 test_insn
836 = make_insn_raw (gen_rtx_SET (VOIDmode,
837 gen_rtx_REG (word_mode,
838 FIRST_PSEUDO_REGISTER * 2),
839 const0_rtx));
840 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
841 }
842
843 /* Now make an insn like the one we would make when GCSE'ing and see if
844 valid. */
845 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
846 SET_SRC (PATTERN (test_insn)) = x;
847
848 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
849 if (icode < 0)
850 return false;
851
852 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
853 return false;
854
855 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
856 return false;
857
858 return true;
859 }
860
861 /* Return nonzero if the operands of expression X are unchanged from the
862 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
863 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
864
865 static int
866 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
867 {
868 int i, j;
869 enum rtx_code code;
870 const char *fmt;
871
872 if (x == 0)
873 return 1;
874
875 code = GET_CODE (x);
876 switch (code)
877 {
878 case REG:
879 {
880 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
881
882 if (info->last_bb != current_bb)
883 return 1;
884 if (avail_p)
885 return info->last_set < DF_INSN_LUID (insn);
886 else
887 return info->first_set >= DF_INSN_LUID (insn);
888 }
889
890 case MEM:
891 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
892 x, avail_p))
893 return 0;
894 else
895 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
896
897 case PRE_DEC:
898 case PRE_INC:
899 case POST_DEC:
900 case POST_INC:
901 case PRE_MODIFY:
902 case POST_MODIFY:
903 return 0;
904
905 case PC:
906 case CC0: /*FIXME*/
907 case CONST:
908 CASE_CONST_ANY:
909 case SYMBOL_REF:
910 case LABEL_REF:
911 case ADDR_VEC:
912 case ADDR_DIFF_VEC:
913 return 1;
914
915 default:
916 break;
917 }
918
919 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
920 {
921 if (fmt[i] == 'e')
922 {
923 /* If we are about to do the last recursive call needed at this
924 level, change it into iteration. This function is called enough
925 to be worth it. */
926 if (i == 0)
927 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
928
929 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
930 return 0;
931 }
932 else if (fmt[i] == 'E')
933 for (j = 0; j < XVECLEN (x, i); j++)
934 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
935 return 0;
936 }
937
938 return 1;
939 }
940
941 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
942
943 struct mem_conflict_info
944 {
945 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
946 see if a memory store conflicts with this memory load. */
947 const_rtx mem;
948
949 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
950 references. */
951 bool conflict;
952 };
953
954 /* DEST is the output of an instruction. If it is a memory reference and
955 possibly conflicts with the load found in DATA, then communicate this
956 information back through DATA. */
957
958 static void
959 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
960 void *data)
961 {
962 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
963
964 while (GET_CODE (dest) == SUBREG
965 || GET_CODE (dest) == ZERO_EXTRACT
966 || GET_CODE (dest) == STRICT_LOW_PART)
967 dest = XEXP (dest, 0);
968
969 /* If DEST is not a MEM, then it will not conflict with the load. Note
970 that function calls are assumed to clobber memory, but are handled
971 elsewhere. */
972 if (! MEM_P (dest))
973 return;
974
975 /* If we are setting a MEM in our list of specially recognized MEMs,
976 don't mark as killed this time. */
977 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
978 {
979 if (!find_rtx_in_ldst (dest))
980 mci->conflict = true;
981 return;
982 }
983
984 if (true_dependence (dest, GET_MODE (dest), mci->mem))
985 mci->conflict = true;
986 }
987
988 /* Return nonzero if the expression in X (a memory reference) is killed
989 in block BB before or after the insn with the LUID in UID_LIMIT.
990 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
991 before UID_LIMIT.
992
993 To check the entire block, set UID_LIMIT to max_uid + 1 and
994 AVAIL_P to 0. */
995
996 static int
997 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
998 int avail_p)
999 {
1000 VEC (rtx,heap) *list = modify_mem_list[bb->index];
1001 rtx setter;
1002 unsigned ix;
1003
1004 /* If this is a readonly then we aren't going to be changing it. */
1005 if (MEM_READONLY_P (x))
1006 return 0;
1007
1008 FOR_EACH_VEC_ELT_REVERSE (rtx, list, ix, setter)
1009 {
1010 struct mem_conflict_info mci;
1011
1012 /* Ignore entries in the list that do not apply. */
1013 if ((avail_p
1014 && DF_INSN_LUID (setter) < uid_limit)
1015 || (! avail_p
1016 && DF_INSN_LUID (setter) > uid_limit))
1017 continue;
1018
1019 /* If SETTER is a call everything is clobbered. Note that calls
1020 to pure functions are never put on the list, so we need not
1021 worry about them. */
1022 if (CALL_P (setter))
1023 return 1;
1024
1025 /* SETTER must be an INSN of some kind that sets memory. Call
1026 note_stores to examine each hunk of memory that is modified. */
1027 mci.mem = x;
1028 mci.conflict = false;
1029 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1030 if (mci.conflict)
1031 return 1;
1032 }
1033 return 0;
1034 }
1035
1036 /* Return nonzero if the operands of expression X are unchanged from
1037 the start of INSN's basic block up to but not including INSN. */
1038
1039 static int
1040 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1041 {
1042 return oprs_unchanged_p (x, insn, 0);
1043 }
1044
1045 /* Return nonzero if the operands of expression X are unchanged from
1046 INSN to the end of INSN's basic block. */
1047
1048 static int
1049 oprs_available_p (const_rtx x, const_rtx insn)
1050 {
1051 return oprs_unchanged_p (x, insn, 1);
1052 }
1053
1054 /* Hash expression X.
1055
1056 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1057 indicating if a volatile operand is found or if the expression contains
1058 something we don't want to insert in the table. HASH_TABLE_SIZE is
1059 the current size of the hash table to be probed. */
1060
1061 static unsigned int
1062 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1063 int hash_table_size)
1064 {
1065 unsigned int hash;
1066
1067 *do_not_record_p = 0;
1068
1069 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1070 return hash % hash_table_size;
1071 }
1072
1073 /* Return nonzero if exp1 is equivalent to exp2. */
1074
1075 static int
1076 expr_equiv_p (const_rtx x, const_rtx y)
1077 {
1078 return exp_equiv_p (x, y, 0, true);
1079 }
1080
1081 /* Insert expression X in INSN in the hash TABLE.
1082 If it is already present, record it as the last occurrence in INSN's
1083 basic block.
1084
1085 MODE is the mode of the value X is being stored into.
1086 It is only used if X is a CONST_INT.
1087
1088 ANTIC_P is nonzero if X is an anticipatable expression.
1089 AVAIL_P is nonzero if X is an available expression.
1090
1091 MAX_DISTANCE is the maximum distance in instructions this expression can
1092 be moved. */
1093
1094 static void
1095 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1096 int avail_p, int max_distance, struct hash_table_d *table)
1097 {
1098 int found, do_not_record_p;
1099 unsigned int hash;
1100 struct expr *cur_expr, *last_expr = NULL;
1101 struct occr *antic_occr, *avail_occr;
1102
1103 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1104
1105 /* Do not insert expression in table if it contains volatile operands,
1106 or if hash_expr determines the expression is something we don't want
1107 to or can't handle. */
1108 if (do_not_record_p)
1109 return;
1110
1111 cur_expr = table->table[hash];
1112 found = 0;
1113
1114 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1115 {
1116 /* If the expression isn't found, save a pointer to the end of
1117 the list. */
1118 last_expr = cur_expr;
1119 cur_expr = cur_expr->next_same_hash;
1120 }
1121
1122 if (! found)
1123 {
1124 cur_expr = GOBNEW (struct expr);
1125 bytes_used += sizeof (struct expr);
1126 if (table->table[hash] == NULL)
1127 /* This is the first pattern that hashed to this index. */
1128 table->table[hash] = cur_expr;
1129 else
1130 /* Add EXPR to end of this hash chain. */
1131 last_expr->next_same_hash = cur_expr;
1132
1133 /* Set the fields of the expr element. */
1134 cur_expr->expr = x;
1135 cur_expr->bitmap_index = table->n_elems++;
1136 cur_expr->next_same_hash = NULL;
1137 cur_expr->antic_occr = NULL;
1138 cur_expr->avail_occr = NULL;
1139 gcc_assert (max_distance >= 0);
1140 cur_expr->max_distance = max_distance;
1141 }
1142 else
1143 gcc_assert (cur_expr->max_distance == max_distance);
1144
1145 /* Now record the occurrence(s). */
1146 if (antic_p)
1147 {
1148 antic_occr = cur_expr->antic_occr;
1149
1150 if (antic_occr
1151 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1152 antic_occr = NULL;
1153
1154 if (antic_occr)
1155 /* Found another instance of the expression in the same basic block.
1156 Prefer the currently recorded one. We want the first one in the
1157 block and the block is scanned from start to end. */
1158 ; /* nothing to do */
1159 else
1160 {
1161 /* First occurrence of this expression in this basic block. */
1162 antic_occr = GOBNEW (struct occr);
1163 bytes_used += sizeof (struct occr);
1164 antic_occr->insn = insn;
1165 antic_occr->next = cur_expr->antic_occr;
1166 antic_occr->deleted_p = 0;
1167 cur_expr->antic_occr = antic_occr;
1168 }
1169 }
1170
1171 if (avail_p)
1172 {
1173 avail_occr = cur_expr->avail_occr;
1174
1175 if (avail_occr
1176 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1177 {
1178 /* Found another instance of the expression in the same basic block.
1179 Prefer this occurrence to the currently recorded one. We want
1180 the last one in the block and the block is scanned from start
1181 to end. */
1182 avail_occr->insn = insn;
1183 }
1184 else
1185 {
1186 /* First occurrence of this expression in this basic block. */
1187 avail_occr = GOBNEW (struct occr);
1188 bytes_used += sizeof (struct occr);
1189 avail_occr->insn = insn;
1190 avail_occr->next = cur_expr->avail_occr;
1191 avail_occr->deleted_p = 0;
1192 cur_expr->avail_occr = avail_occr;
1193 }
1194 }
1195 }
1196
1197 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1198
1199 static void
1200 hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1201 {
1202 rtx src = SET_SRC (set);
1203 rtx dest = SET_DEST (set);
1204 rtx note;
1205
1206 if (GET_CODE (src) == CALL)
1207 hash_scan_call (src, insn, table);
1208
1209 else if (REG_P (dest))
1210 {
1211 unsigned int regno = REGNO (dest);
1212 int max_distance = 0;
1213
1214 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1215
1216 This allows us to do a single GCSE pass and still eliminate
1217 redundant constants, addresses or other expressions that are
1218 constructed with multiple instructions.
1219
1220 However, keep the original SRC if INSN is a simple reg-reg move.
1221 In this case, there will almost always be a REG_EQUAL note on the
1222 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1223 for INSN, we miss copy propagation opportunities and we perform the
1224 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1225 do more than one PRE GCSE pass.
1226
1227 Note that this does not impede profitable constant propagations. We
1228 "look through" reg-reg sets in lookup_avail_set. */
1229 note = find_reg_equal_equiv_note (insn);
1230 if (note != 0
1231 && REG_NOTE_KIND (note) == REG_EQUAL
1232 && !REG_P (src)
1233 && want_to_gcse_p (XEXP (note, 0), NULL))
1234 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1235
1236 /* Only record sets of pseudo-regs in the hash table. */
1237 if (regno >= FIRST_PSEUDO_REGISTER
1238 /* Don't GCSE something if we can't do a reg/reg copy. */
1239 && can_copy_p (GET_MODE (dest))
1240 /* GCSE commonly inserts instruction after the insn. We can't
1241 do that easily for EH edges so disable GCSE on these for now. */
1242 /* ??? We can now easily create new EH landing pads at the
1243 gimple level, for splitting edges; there's no reason we
1244 can't do the same thing at the rtl level. */
1245 && !can_throw_internal (insn)
1246 /* Is SET_SRC something we want to gcse? */
1247 && want_to_gcse_p (src, &max_distance)
1248 /* Don't CSE a nop. */
1249 && ! set_noop_p (set)
1250 /* Don't GCSE if it has attached REG_EQUIV note.
1251 At this point this only function parameters should have
1252 REG_EQUIV notes and if the argument slot is used somewhere
1253 explicitly, it means address of parameter has been taken,
1254 so we should not extend the lifetime of the pseudo. */
1255 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1256 {
1257 /* An expression is not anticipatable if its operands are
1258 modified before this insn or if this is not the only SET in
1259 this insn. The latter condition does not have to mean that
1260 SRC itself is not anticipatable, but we just will not be
1261 able to handle code motion of insns with multiple sets. */
1262 int antic_p = oprs_anticipatable_p (src, insn)
1263 && !multiple_sets (insn);
1264 /* An expression is not available if its operands are
1265 subsequently modified, including this insn. It's also not
1266 available if this is a branch, because we can't insert
1267 a set after the branch. */
1268 int avail_p = (oprs_available_p (src, insn)
1269 && ! JUMP_P (insn));
1270
1271 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1272 max_distance, table);
1273 }
1274 }
1275 /* In case of store we want to consider the memory value as available in
1276 the REG stored in that memory. This makes it possible to remove
1277 redundant loads from due to stores to the same location. */
1278 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1279 {
1280 unsigned int regno = REGNO (src);
1281 int max_distance = 0;
1282
1283 /* Only record sets of pseudo-regs in the hash table. */
1284 if (regno >= FIRST_PSEUDO_REGISTER
1285 /* Don't GCSE something if we can't do a reg/reg copy. */
1286 && can_copy_p (GET_MODE (src))
1287 /* GCSE commonly inserts instruction after the insn. We can't
1288 do that easily for EH edges so disable GCSE on these for now. */
1289 && !can_throw_internal (insn)
1290 /* Is SET_DEST something we want to gcse? */
1291 && want_to_gcse_p (dest, &max_distance)
1292 /* Don't CSE a nop. */
1293 && ! set_noop_p (set)
1294 /* Don't GCSE if it has attached REG_EQUIV note.
1295 At this point this only function parameters should have
1296 REG_EQUIV notes and if the argument slot is used somewhere
1297 explicitly, it means address of parameter has been taken,
1298 so we should not extend the lifetime of the pseudo. */
1299 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1300 || ! MEM_P (XEXP (note, 0))))
1301 {
1302 /* Stores are never anticipatable. */
1303 int antic_p = 0;
1304 /* An expression is not available if its operands are
1305 subsequently modified, including this insn. It's also not
1306 available if this is a branch, because we can't insert
1307 a set after the branch. */
1308 int avail_p = oprs_available_p (dest, insn)
1309 && ! JUMP_P (insn);
1310
1311 /* Record the memory expression (DEST) in the hash table. */
1312 insert_expr_in_table (dest, GET_MODE (dest), insn,
1313 antic_p, avail_p, max_distance, table);
1314 }
1315 }
1316 }
1317
1318 static void
1319 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1320 struct hash_table_d *table ATTRIBUTE_UNUSED)
1321 {
1322 /* Currently nothing to do. */
1323 }
1324
1325 static void
1326 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1327 struct hash_table_d *table ATTRIBUTE_UNUSED)
1328 {
1329 /* Currently nothing to do. */
1330 }
1331
1332 /* Process INSN and add hash table entries as appropriate. */
1333
1334 static void
1335 hash_scan_insn (rtx insn, struct hash_table_d *table)
1336 {
1337 rtx pat = PATTERN (insn);
1338 int i;
1339
1340 /* Pick out the sets of INSN and for other forms of instructions record
1341 what's been modified. */
1342
1343 if (GET_CODE (pat) == SET)
1344 hash_scan_set (pat, insn, table);
1345
1346 else if (GET_CODE (pat) == CLOBBER)
1347 hash_scan_clobber (pat, insn, table);
1348
1349 else if (GET_CODE (pat) == CALL)
1350 hash_scan_call (pat, insn, table);
1351
1352 else if (GET_CODE (pat) == PARALLEL)
1353 for (i = 0; i < XVECLEN (pat, 0); i++)
1354 {
1355 rtx x = XVECEXP (pat, 0, i);
1356
1357 if (GET_CODE (x) == SET)
1358 hash_scan_set (x, insn, table);
1359 else if (GET_CODE (x) == CLOBBER)
1360 hash_scan_clobber (x, insn, table);
1361 else if (GET_CODE (x) == CALL)
1362 hash_scan_call (x, insn, table);
1363 }
1364 }
1365
1366 /* Dump the hash table TABLE to file FILE under the name NAME. */
1367
1368 static void
1369 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1370 {
1371 int i;
1372 /* Flattened out table, so it's printed in proper order. */
1373 struct expr **flat_table;
1374 unsigned int *hash_val;
1375 struct expr *expr;
1376
1377 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1378 hash_val = XNEWVEC (unsigned int, table->n_elems);
1379
1380 for (i = 0; i < (int) table->size; i++)
1381 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1382 {
1383 flat_table[expr->bitmap_index] = expr;
1384 hash_val[expr->bitmap_index] = i;
1385 }
1386
1387 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1388 name, table->size, table->n_elems);
1389
1390 for (i = 0; i < (int) table->n_elems; i++)
1391 if (flat_table[i] != 0)
1392 {
1393 expr = flat_table[i];
1394 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1395 expr->bitmap_index, hash_val[i], expr->max_distance);
1396 print_rtl (file, expr->expr);
1397 fprintf (file, "\n");
1398 }
1399
1400 fprintf (file, "\n");
1401
1402 free (flat_table);
1403 free (hash_val);
1404 }
1405
1406 /* Record register first/last/block set information for REGNO in INSN.
1407
1408 first_set records the first place in the block where the register
1409 is set and is used to compute "anticipatability".
1410
1411 last_set records the last place in the block where the register
1412 is set and is used to compute "availability".
1413
1414 last_bb records the block for which first_set and last_set are
1415 valid, as a quick test to invalidate them. */
1416
1417 static void
1418 record_last_reg_set_info (rtx insn, int regno)
1419 {
1420 struct reg_avail_info *info = &reg_avail_info[regno];
1421 int luid = DF_INSN_LUID (insn);
1422
1423 info->last_set = luid;
1424 if (info->last_bb != current_bb)
1425 {
1426 info->last_bb = current_bb;
1427 info->first_set = luid;
1428 }
1429 }
1430
1431 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1432 Note we store a pair of elements in the list, so they have to be
1433 taken off pairwise. */
1434
1435 static void
1436 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1437 void * v_insn)
1438 {
1439 rtx dest_addr, insn;
1440 int bb;
1441 modify_pair pair;
1442
1443 while (GET_CODE (dest) == SUBREG
1444 || GET_CODE (dest) == ZERO_EXTRACT
1445 || GET_CODE (dest) == STRICT_LOW_PART)
1446 dest = XEXP (dest, 0);
1447
1448 /* If DEST is not a MEM, then it will not conflict with a load. Note
1449 that function calls are assumed to clobber memory, but are handled
1450 elsewhere. */
1451
1452 if (! MEM_P (dest))
1453 return;
1454
1455 dest_addr = get_addr (XEXP (dest, 0));
1456 dest_addr = canon_rtx (dest_addr);
1457 insn = (rtx) v_insn;
1458 bb = BLOCK_FOR_INSN (insn)->index;
1459
1460 pair.dest = dest;
1461 pair.dest_addr = dest_addr;
1462 VEC_safe_push (modify_pair, heap, canon_modify_mem_list[bb], pair);
1463 }
1464
1465 /* Record memory modification information for INSN. We do not actually care
1466 about the memory location(s) that are set, or even how they are set (consider
1467 a CALL_INSN). We merely need to record which insns modify memory. */
1468
1469 static void
1470 record_last_mem_set_info (rtx insn)
1471 {
1472 int bb = BLOCK_FOR_INSN (insn)->index;
1473
1474 /* load_killed_in_block_p will handle the case of calls clobbering
1475 everything. */
1476 VEC_safe_push (rtx, heap, modify_mem_list[bb], insn);
1477 bitmap_set_bit (modify_mem_list_set, bb);
1478
1479 if (CALL_P (insn))
1480 bitmap_set_bit (blocks_with_calls, bb);
1481 else
1482 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1483 }
1484
1485 /* Called from compute_hash_table via note_stores to handle one
1486 SET or CLOBBER in an insn. DATA is really the instruction in which
1487 the SET is taking place. */
1488
1489 static void
1490 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1491 {
1492 rtx last_set_insn = (rtx) data;
1493
1494 if (GET_CODE (dest) == SUBREG)
1495 dest = SUBREG_REG (dest);
1496
1497 if (REG_P (dest))
1498 record_last_reg_set_info (last_set_insn, REGNO (dest));
1499 else if (MEM_P (dest)
1500 /* Ignore pushes, they clobber nothing. */
1501 && ! push_operand (dest, GET_MODE (dest)))
1502 record_last_mem_set_info (last_set_insn);
1503 }
1504
1505 /* Top level function to create an expression hash table.
1506
1507 Expression entries are placed in the hash table if
1508 - they are of the form (set (pseudo-reg) src),
1509 - src is something we want to perform GCSE on,
1510 - none of the operands are subsequently modified in the block
1511
1512 Currently src must be a pseudo-reg or a const_int.
1513
1514 TABLE is the table computed. */
1515
1516 static void
1517 compute_hash_table_work (struct hash_table_d *table)
1518 {
1519 int i;
1520
1521 /* re-Cache any INSN_LIST nodes we have allocated. */
1522 clear_modify_mem_tables ();
1523 /* Some working arrays used to track first and last set in each block. */
1524 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1525
1526 for (i = 0; i < max_reg_num (); ++i)
1527 reg_avail_info[i].last_bb = NULL;
1528
1529 FOR_EACH_BB (current_bb)
1530 {
1531 rtx insn;
1532 unsigned int regno;
1533
1534 /* First pass over the instructions records information used to
1535 determine when registers and memory are first and last set. */
1536 FOR_BB_INSNS (current_bb, insn)
1537 {
1538 if (!NONDEBUG_INSN_P (insn))
1539 continue;
1540
1541 if (CALL_P (insn))
1542 {
1543 hard_reg_set_iterator hrsi;
1544 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1545 0, regno, hrsi)
1546 record_last_reg_set_info (insn, regno);
1547
1548 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1549 record_last_mem_set_info (insn);
1550 }
1551
1552 note_stores (PATTERN (insn), record_last_set_info, insn);
1553 }
1554
1555 /* The next pass builds the hash table. */
1556 FOR_BB_INSNS (current_bb, insn)
1557 if (NONDEBUG_INSN_P (insn))
1558 hash_scan_insn (insn, table);
1559 }
1560
1561 free (reg_avail_info);
1562 reg_avail_info = NULL;
1563 }
1564
1565 /* Allocate space for the set/expr hash TABLE.
1566 It is used to determine the number of buckets to use. */
1567
1568 static void
1569 alloc_hash_table (struct hash_table_d *table)
1570 {
1571 int n;
1572
1573 n = get_max_insn_count ();
1574
1575 table->size = n / 4;
1576 if (table->size < 11)
1577 table->size = 11;
1578
1579 /* Attempt to maintain efficient use of hash table.
1580 Making it an odd number is simplest for now.
1581 ??? Later take some measurements. */
1582 table->size |= 1;
1583 n = table->size * sizeof (struct expr *);
1584 table->table = GNEWVAR (struct expr *, n);
1585 }
1586
1587 /* Free things allocated by alloc_hash_table. */
1588
1589 static void
1590 free_hash_table (struct hash_table_d *table)
1591 {
1592 free (table->table);
1593 }
1594
1595 /* Compute the expression hash table TABLE. */
1596
1597 static void
1598 compute_hash_table (struct hash_table_d *table)
1599 {
1600 /* Initialize count of number of entries in hash table. */
1601 table->n_elems = 0;
1602 memset (table->table, 0, table->size * sizeof (struct expr *));
1603
1604 compute_hash_table_work (table);
1605 }
1606 \f
1607 /* Expression tracking support. */
1608
1609 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1610 static void
1611 clear_modify_mem_tables (void)
1612 {
1613 unsigned i;
1614 bitmap_iterator bi;
1615
1616 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1617 {
1618 VEC_free (rtx, heap, modify_mem_list[i]);
1619 VEC_free (modify_pair, heap, canon_modify_mem_list[i]);
1620 }
1621 bitmap_clear (modify_mem_list_set);
1622 bitmap_clear (blocks_with_calls);
1623 }
1624
1625 /* Release memory used by modify_mem_list_set. */
1626
1627 static void
1628 free_modify_mem_tables (void)
1629 {
1630 clear_modify_mem_tables ();
1631 free (modify_mem_list);
1632 free (canon_modify_mem_list);
1633 modify_mem_list = 0;
1634 canon_modify_mem_list = 0;
1635 }
1636 \f
1637 /* For each block, compute whether X is transparent. X is either an
1638 expression or an assignment [though we don't care which, for this context
1639 an assignment is treated as an expression]. For each block where an
1640 element of X is modified, reset the INDX bit in BMAP. */
1641
1642 static void
1643 compute_transp (const_rtx x, int indx, sbitmap *bmap)
1644 {
1645 int i, j;
1646 enum rtx_code code;
1647 const char *fmt;
1648
1649 /* repeat is used to turn tail-recursion into iteration since GCC
1650 can't do it when there's no return value. */
1651 repeat:
1652
1653 if (x == 0)
1654 return;
1655
1656 code = GET_CODE (x);
1657 switch (code)
1658 {
1659 case REG:
1660 {
1661 df_ref def;
1662 for (def = DF_REG_DEF_CHAIN (REGNO (x));
1663 def;
1664 def = DF_REF_NEXT_REG (def))
1665 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
1666 }
1667
1668 return;
1669
1670 case MEM:
1671 if (! MEM_READONLY_P (x))
1672 {
1673 bitmap_iterator bi;
1674 unsigned bb_index;
1675
1676 /* First handle all the blocks with calls. We don't need to
1677 do any list walking for them. */
1678 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
1679 {
1680 RESET_BIT (bmap[bb_index], indx);
1681 }
1682
1683 /* Now iterate over the blocks which have memory modifications
1684 but which do not have any calls. */
1685 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
1686 blocks_with_calls,
1687 0, bb_index, bi)
1688 {
1689 VEC (modify_pair,heap) *list
1690 = canon_modify_mem_list[bb_index];
1691 modify_pair *pair;
1692 unsigned ix;
1693
1694 FOR_EACH_VEC_ELT_REVERSE (modify_pair, list, ix, pair)
1695 {
1696 rtx dest = pair->dest;
1697 rtx dest_addr = pair->dest_addr;
1698
1699 if (canon_true_dependence (dest, GET_MODE (dest),
1700 dest_addr, x, NULL_RTX))
1701 RESET_BIT (bmap[bb_index], indx);
1702 }
1703 }
1704 }
1705
1706 x = XEXP (x, 0);
1707 goto repeat;
1708
1709 case PC:
1710 case CC0: /*FIXME*/
1711 case CONST:
1712 CASE_CONST_ANY:
1713 case SYMBOL_REF:
1714 case LABEL_REF:
1715 case ADDR_VEC:
1716 case ADDR_DIFF_VEC:
1717 return;
1718
1719 default:
1720 break;
1721 }
1722
1723 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1724 {
1725 if (fmt[i] == 'e')
1726 {
1727 /* If we are about to do the last recursive call
1728 needed at this level, change it into iteration.
1729 This function is called enough to be worth it. */
1730 if (i == 0)
1731 {
1732 x = XEXP (x, i);
1733 goto repeat;
1734 }
1735
1736 compute_transp (XEXP (x, i), indx, bmap);
1737 }
1738 else if (fmt[i] == 'E')
1739 for (j = 0; j < XVECLEN (x, i); j++)
1740 compute_transp (XVECEXP (x, i, j), indx, bmap);
1741 }
1742 }
1743 \f
1744 /* Compute PRE+LCM working variables. */
1745
1746 /* Local properties of expressions. */
1747
1748 /* Nonzero for expressions that are transparent in the block. */
1749 static sbitmap *transp;
1750
1751 /* Nonzero for expressions that are computed (available) in the block. */
1752 static sbitmap *comp;
1753
1754 /* Nonzero for expressions that are locally anticipatable in the block. */
1755 static sbitmap *antloc;
1756
1757 /* Nonzero for expressions where this block is an optimal computation
1758 point. */
1759 static sbitmap *pre_optimal;
1760
1761 /* Nonzero for expressions which are redundant in a particular block. */
1762 static sbitmap *pre_redundant;
1763
1764 /* Nonzero for expressions which should be inserted on a specific edge. */
1765 static sbitmap *pre_insert_map;
1766
1767 /* Nonzero for expressions which should be deleted in a specific block. */
1768 static sbitmap *pre_delete_map;
1769
1770 /* Allocate vars used for PRE analysis. */
1771
1772 static void
1773 alloc_pre_mem (int n_blocks, int n_exprs)
1774 {
1775 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1776 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1777 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1778
1779 pre_optimal = NULL;
1780 pre_redundant = NULL;
1781 pre_insert_map = NULL;
1782 pre_delete_map = NULL;
1783 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1784
1785 /* pre_insert and pre_delete are allocated later. */
1786 }
1787
1788 /* Free vars used for PRE analysis. */
1789
1790 static void
1791 free_pre_mem (void)
1792 {
1793 sbitmap_vector_free (transp);
1794 sbitmap_vector_free (comp);
1795
1796 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1797
1798 if (pre_optimal)
1799 sbitmap_vector_free (pre_optimal);
1800 if (pre_redundant)
1801 sbitmap_vector_free (pre_redundant);
1802 if (pre_insert_map)
1803 sbitmap_vector_free (pre_insert_map);
1804 if (pre_delete_map)
1805 sbitmap_vector_free (pre_delete_map);
1806
1807 transp = comp = NULL;
1808 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1809 }
1810
1811 /* Remove certain expressions from anticipatable and transparent
1812 sets of basic blocks that have incoming abnormal edge.
1813 For PRE remove potentially trapping expressions to avoid placing
1814 them on abnormal edges. For hoisting remove memory references that
1815 can be clobbered by calls. */
1816
1817 static void
1818 prune_expressions (bool pre_p)
1819 {
1820 sbitmap prune_exprs;
1821 struct expr *expr;
1822 unsigned int ui;
1823 basic_block bb;
1824
1825 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1826 sbitmap_zero (prune_exprs);
1827 for (ui = 0; ui < expr_hash_table.size; ui++)
1828 {
1829 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1830 {
1831 /* Note potentially trapping expressions. */
1832 if (may_trap_p (expr->expr))
1833 {
1834 SET_BIT (prune_exprs, expr->bitmap_index);
1835 continue;
1836 }
1837
1838 if (!pre_p && MEM_P (expr->expr))
1839 /* Note memory references that can be clobbered by a call.
1840 We do not split abnormal edges in hoisting, so would
1841 a memory reference get hoisted along an abnormal edge,
1842 it would be placed /before/ the call. Therefore, only
1843 constant memory references can be hoisted along abnormal
1844 edges. */
1845 {
1846 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1847 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1848 continue;
1849
1850 if (MEM_READONLY_P (expr->expr)
1851 && !MEM_VOLATILE_P (expr->expr)
1852 && MEM_NOTRAP_P (expr->expr))
1853 /* Constant memory reference, e.g., a PIC address. */
1854 continue;
1855
1856 /* ??? Optimally, we would use interprocedural alias
1857 analysis to determine if this mem is actually killed
1858 by this call. */
1859
1860 SET_BIT (prune_exprs, expr->bitmap_index);
1861 }
1862 }
1863 }
1864
1865 FOR_EACH_BB (bb)
1866 {
1867 edge e;
1868 edge_iterator ei;
1869
1870 /* If the current block is the destination of an abnormal edge, we
1871 kill all trapping (for PRE) and memory (for hoist) expressions
1872 because we won't be able to properly place the instruction on
1873 the edge. So make them neither anticipatable nor transparent.
1874 This is fairly conservative.
1875
1876 ??? For hoisting it may be necessary to check for set-and-jump
1877 instructions here, not just for abnormal edges. The general problem
1878 is that when an expression cannot not be placed right at the end of
1879 a basic block we should account for any side-effects of a subsequent
1880 jump instructions that could clobber the expression. It would
1881 be best to implement this check along the lines of
1882 should_hoist_expr_to_dom where the target block is already known
1883 and, hence, there's no need to conservatively prune expressions on
1884 "intermediate" set-and-jump instructions. */
1885 FOR_EACH_EDGE (e, ei, bb->preds)
1886 if ((e->flags & EDGE_ABNORMAL)
1887 && (pre_p || CALL_P (BB_END (e->src))))
1888 {
1889 sbitmap_difference (antloc[bb->index],
1890 antloc[bb->index], prune_exprs);
1891 sbitmap_difference (transp[bb->index],
1892 transp[bb->index], prune_exprs);
1893 break;
1894 }
1895 }
1896
1897 sbitmap_free (prune_exprs);
1898 }
1899
1900 /* It may be necessary to insert a large number of insns on edges to
1901 make the existing occurrences of expressions fully redundant. This
1902 routine examines the set of insertions and deletions and if the ratio
1903 of insertions to deletions is too high for a particular expression, then
1904 the expression is removed from the insertion/deletion sets.
1905
1906 N_ELEMS is the number of elements in the hash table. */
1907
1908 static void
1909 prune_insertions_deletions (int n_elems)
1910 {
1911 sbitmap_iterator sbi;
1912 sbitmap prune_exprs;
1913
1914 /* We always use I to iterate over blocks/edges and J to iterate over
1915 expressions. */
1916 unsigned int i, j;
1917
1918 /* Counts for the number of times an expression needs to be inserted and
1919 number of times an expression can be removed as a result. */
1920 int *insertions = GCNEWVEC (int, n_elems);
1921 int *deletions = GCNEWVEC (int, n_elems);
1922
1923 /* Set of expressions which require too many insertions relative to
1924 the number of deletions achieved. We will prune these out of the
1925 insertion/deletion sets. */
1926 prune_exprs = sbitmap_alloc (n_elems);
1927 sbitmap_zero (prune_exprs);
1928
1929 /* Iterate over the edges counting the number of times each expression
1930 needs to be inserted. */
1931 for (i = 0; i < (unsigned) n_edges; i++)
1932 {
1933 EXECUTE_IF_SET_IN_SBITMAP (pre_insert_map[i], 0, j, sbi)
1934 insertions[j]++;
1935 }
1936
1937 /* Similarly for deletions, but those occur in blocks rather than on
1938 edges. */
1939 for (i = 0; i < (unsigned) last_basic_block; i++)
1940 {
1941 EXECUTE_IF_SET_IN_SBITMAP (pre_delete_map[i], 0, j, sbi)
1942 deletions[j]++;
1943 }
1944
1945 /* Now that we have accurate counts, iterate over the elements in the
1946 hash table and see if any need too many insertions relative to the
1947 number of evaluations that can be removed. If so, mark them in
1948 PRUNE_EXPRS. */
1949 for (j = 0; j < (unsigned) n_elems; j++)
1950 if (deletions[j]
1951 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1952 SET_BIT (prune_exprs, j);
1953
1954 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1955 EXECUTE_IF_SET_IN_SBITMAP (prune_exprs, 0, j, sbi)
1956 {
1957 for (i = 0; i < (unsigned) n_edges; i++)
1958 RESET_BIT (pre_insert_map[i], j);
1959
1960 for (i = 0; i < (unsigned) last_basic_block; i++)
1961 RESET_BIT (pre_delete_map[i], j);
1962 }
1963
1964 sbitmap_free (prune_exprs);
1965 free (insertions);
1966 free (deletions);
1967 }
1968
1969 /* Top level routine to do the dataflow analysis needed by PRE. */
1970
1971 static struct edge_list *
1972 compute_pre_data (void)
1973 {
1974 struct edge_list *edge_list;
1975 basic_block bb;
1976
1977 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1978 prune_expressions (true);
1979 sbitmap_vector_zero (ae_kill, last_basic_block);
1980
1981 /* Compute ae_kill for each basic block using:
1982
1983 ~(TRANSP | COMP)
1984 */
1985
1986 FOR_EACH_BB (bb)
1987 {
1988 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1989 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1990 }
1991
1992 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1993 ae_kill, &pre_insert_map, &pre_delete_map);
1994 sbitmap_vector_free (antloc);
1995 antloc = NULL;
1996 sbitmap_vector_free (ae_kill);
1997 ae_kill = NULL;
1998
1999 prune_insertions_deletions (expr_hash_table.n_elems);
2000
2001 return edge_list;
2002 }
2003 \f
2004 /* PRE utilities */
2005
2006 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
2007 block BB.
2008
2009 VISITED is a pointer to a working buffer for tracking which BB's have
2010 been visited. It is NULL for the top-level call.
2011
2012 We treat reaching expressions that go through blocks containing the same
2013 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
2014 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2015 2 as not reaching. The intent is to improve the probability of finding
2016 only one reaching expression and to reduce register lifetimes by picking
2017 the closest such expression. */
2018
2019 static int
2020 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
2021 basic_block bb, char *visited)
2022 {
2023 edge pred;
2024 edge_iterator ei;
2025
2026 FOR_EACH_EDGE (pred, ei, bb->preds)
2027 {
2028 basic_block pred_bb = pred->src;
2029
2030 if (pred->src == ENTRY_BLOCK_PTR
2031 /* Has predecessor has already been visited? */
2032 || visited[pred_bb->index])
2033 ;/* Nothing to do. */
2034
2035 /* Does this predecessor generate this expression? */
2036 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
2037 {
2038 /* Is this the occurrence we're looking for?
2039 Note that there's only one generating occurrence per block
2040 so we just need to check the block number. */
2041 if (occr_bb == pred_bb)
2042 return 1;
2043
2044 visited[pred_bb->index] = 1;
2045 }
2046 /* Ignore this predecessor if it kills the expression. */
2047 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
2048 visited[pred_bb->index] = 1;
2049
2050 /* Neither gen nor kill. */
2051 else
2052 {
2053 visited[pred_bb->index] = 1;
2054 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2055 return 1;
2056 }
2057 }
2058
2059 /* All paths have been checked. */
2060 return 0;
2061 }
2062
2063 /* The wrapper for pre_expr_reaches_here_work that ensures that any
2064 memory allocated for that function is returned. */
2065
2066 static int
2067 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2068 {
2069 int rval;
2070 char *visited = XCNEWVEC (char, last_basic_block);
2071
2072 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2073
2074 free (visited);
2075 return rval;
2076 }
2077 \f
2078 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2079
2080 static rtx
2081 process_insert_insn (struct expr *expr)
2082 {
2083 rtx reg = expr->reaching_reg;
2084 /* Copy the expression to make sure we don't have any sharing issues. */
2085 rtx exp = copy_rtx (expr->expr);
2086 rtx pat;
2087
2088 start_sequence ();
2089
2090 /* If the expression is something that's an operand, like a constant,
2091 just copy it to a register. */
2092 if (general_operand (exp, GET_MODE (reg)))
2093 emit_move_insn (reg, exp);
2094
2095 /* Otherwise, make a new insn to compute this expression and make sure the
2096 insn will be recognized (this also adds any needed CLOBBERs). */
2097 else
2098 {
2099 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2100
2101 if (insn_invalid_p (insn, false))
2102 gcc_unreachable ();
2103 }
2104
2105 pat = get_insns ();
2106 end_sequence ();
2107
2108 return pat;
2109 }
2110
2111 /* Add EXPR to the end of basic block BB.
2112
2113 This is used by both the PRE and code hoisting. */
2114
2115 static void
2116 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2117 {
2118 rtx insn = BB_END (bb);
2119 rtx new_insn;
2120 rtx reg = expr->reaching_reg;
2121 int regno = REGNO (reg);
2122 rtx pat, pat_end;
2123
2124 pat = process_insert_insn (expr);
2125 gcc_assert (pat && INSN_P (pat));
2126
2127 pat_end = pat;
2128 while (NEXT_INSN (pat_end) != NULL_RTX)
2129 pat_end = NEXT_INSN (pat_end);
2130
2131 /* If the last insn is a jump, insert EXPR in front [taking care to
2132 handle cc0, etc. properly]. Similarly we need to care trapping
2133 instructions in presence of non-call exceptions. */
2134
2135 if (JUMP_P (insn)
2136 || (NONJUMP_INSN_P (insn)
2137 && (!single_succ_p (bb)
2138 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2139 {
2140 #ifdef HAVE_cc0
2141 rtx note;
2142 #endif
2143
2144 /* If this is a jump table, then we can't insert stuff here. Since
2145 we know the previous real insn must be the tablejump, we insert
2146 the new instruction just before the tablejump. */
2147 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
2148 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
2149 insn = prev_active_insn (insn);
2150
2151 #ifdef HAVE_cc0
2152 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2153 if cc0 isn't set. */
2154 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2155 if (note)
2156 insn = XEXP (note, 0);
2157 else
2158 {
2159 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
2160 if (maybe_cc0_setter
2161 && INSN_P (maybe_cc0_setter)
2162 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2163 insn = maybe_cc0_setter;
2164 }
2165 #endif
2166 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2167 new_insn = emit_insn_before_noloc (pat, insn, bb);
2168 }
2169
2170 /* Likewise if the last insn is a call, as will happen in the presence
2171 of exception handling. */
2172 else if (CALL_P (insn)
2173 && (!single_succ_p (bb)
2174 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2175 {
2176 /* Keeping in mind targets with small register classes and parameters
2177 in registers, we search backward and place the instructions before
2178 the first parameter is loaded. Do this for everyone for consistency
2179 and a presumption that we'll get better code elsewhere as well. */
2180
2181 /* Since different machines initialize their parameter registers
2182 in different orders, assume nothing. Collect the set of all
2183 parameter registers. */
2184 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2185
2186 /* If we found all the parameter loads, then we want to insert
2187 before the first parameter load.
2188
2189 If we did not find all the parameter loads, then we might have
2190 stopped on the head of the block, which could be a CODE_LABEL.
2191 If we inserted before the CODE_LABEL, then we would be putting
2192 the insn in the wrong basic block. In that case, put the insn
2193 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2194 while (LABEL_P (insn)
2195 || NOTE_INSN_BASIC_BLOCK_P (insn))
2196 insn = NEXT_INSN (insn);
2197
2198 new_insn = emit_insn_before_noloc (pat, insn, bb);
2199 }
2200 else
2201 new_insn = emit_insn_after_noloc (pat, insn, bb);
2202
2203 while (1)
2204 {
2205 if (INSN_P (pat))
2206 add_label_notes (PATTERN (pat), new_insn);
2207 if (pat == pat_end)
2208 break;
2209 pat = NEXT_INSN (pat);
2210 }
2211
2212 gcse_create_count++;
2213
2214 if (dump_file)
2215 {
2216 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2217 bb->index, INSN_UID (new_insn));
2218 fprintf (dump_file, "copying expression %d to reg %d\n",
2219 expr->bitmap_index, regno);
2220 }
2221 }
2222
2223 /* Insert partially redundant expressions on edges in the CFG to make
2224 the expressions fully redundant. */
2225
2226 static int
2227 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2228 {
2229 int e, i, j, num_edges, set_size, did_insert = 0;
2230 sbitmap *inserted;
2231
2232 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2233 if it reaches any of the deleted expressions. */
2234
2235 set_size = pre_insert_map[0]->size;
2236 num_edges = NUM_EDGES (edge_list);
2237 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2238 sbitmap_vector_zero (inserted, num_edges);
2239
2240 for (e = 0; e < num_edges; e++)
2241 {
2242 int indx;
2243 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2244
2245 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2246 {
2247 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2248
2249 for (j = indx;
2250 insert && j < (int) expr_hash_table.n_elems;
2251 j++, insert >>= 1)
2252 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2253 {
2254 struct expr *expr = index_map[j];
2255 struct occr *occr;
2256
2257 /* Now look at each deleted occurrence of this expression. */
2258 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2259 {
2260 if (! occr->deleted_p)
2261 continue;
2262
2263 /* Insert this expression on this edge if it would
2264 reach the deleted occurrence in BB. */
2265 if (!TEST_BIT (inserted[e], j))
2266 {
2267 rtx insn;
2268 edge eg = INDEX_EDGE (edge_list, e);
2269
2270 /* We can't insert anything on an abnormal and
2271 critical edge, so we insert the insn at the end of
2272 the previous block. There are several alternatives
2273 detailed in Morgans book P277 (sec 10.5) for
2274 handling this situation. This one is easiest for
2275 now. */
2276
2277 if (eg->flags & EDGE_ABNORMAL)
2278 insert_insn_end_basic_block (index_map[j], bb);
2279 else
2280 {
2281 insn = process_insert_insn (index_map[j]);
2282 insert_insn_on_edge (insn, eg);
2283 }
2284
2285 if (dump_file)
2286 {
2287 fprintf (dump_file, "PRE: edge (%d,%d), ",
2288 bb->index,
2289 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2290 fprintf (dump_file, "copy expression %d\n",
2291 expr->bitmap_index);
2292 }
2293
2294 update_ld_motion_stores (expr);
2295 SET_BIT (inserted[e], j);
2296 did_insert = 1;
2297 gcse_create_count++;
2298 }
2299 }
2300 }
2301 }
2302 }
2303
2304 sbitmap_vector_free (inserted);
2305 return did_insert;
2306 }
2307
2308 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2309 Given "old_reg <- expr" (INSN), instead of adding after it
2310 reaching_reg <- old_reg
2311 it's better to do the following:
2312 reaching_reg <- expr
2313 old_reg <- reaching_reg
2314 because this way copy propagation can discover additional PRE
2315 opportunities. But if this fails, we try the old way.
2316 When "expr" is a store, i.e.
2317 given "MEM <- old_reg", instead of adding after it
2318 reaching_reg <- old_reg
2319 it's better to add it before as follows:
2320 reaching_reg <- old_reg
2321 MEM <- reaching_reg. */
2322
2323 static void
2324 pre_insert_copy_insn (struct expr *expr, rtx insn)
2325 {
2326 rtx reg = expr->reaching_reg;
2327 int regno = REGNO (reg);
2328 int indx = expr->bitmap_index;
2329 rtx pat = PATTERN (insn);
2330 rtx set, first_set, new_insn;
2331 rtx old_reg;
2332 int i;
2333
2334 /* This block matches the logic in hash_scan_insn. */
2335 switch (GET_CODE (pat))
2336 {
2337 case SET:
2338 set = pat;
2339 break;
2340
2341 case PARALLEL:
2342 /* Search through the parallel looking for the set whose
2343 source was the expression that we're interested in. */
2344 first_set = NULL_RTX;
2345 set = NULL_RTX;
2346 for (i = 0; i < XVECLEN (pat, 0); i++)
2347 {
2348 rtx x = XVECEXP (pat, 0, i);
2349 if (GET_CODE (x) == SET)
2350 {
2351 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2352 may not find an equivalent expression, but in this
2353 case the PARALLEL will have a single set. */
2354 if (first_set == NULL_RTX)
2355 first_set = x;
2356 if (expr_equiv_p (SET_SRC (x), expr->expr))
2357 {
2358 set = x;
2359 break;
2360 }
2361 }
2362 }
2363
2364 gcc_assert (first_set);
2365 if (set == NULL_RTX)
2366 set = first_set;
2367 break;
2368
2369 default:
2370 gcc_unreachable ();
2371 }
2372
2373 if (REG_P (SET_DEST (set)))
2374 {
2375 old_reg = SET_DEST (set);
2376 /* Check if we can modify the set destination in the original insn. */
2377 if (validate_change (insn, &SET_DEST (set), reg, 0))
2378 {
2379 new_insn = gen_move_insn (old_reg, reg);
2380 new_insn = emit_insn_after (new_insn, insn);
2381 }
2382 else
2383 {
2384 new_insn = gen_move_insn (reg, old_reg);
2385 new_insn = emit_insn_after (new_insn, insn);
2386 }
2387 }
2388 else /* This is possible only in case of a store to memory. */
2389 {
2390 old_reg = SET_SRC (set);
2391 new_insn = gen_move_insn (reg, old_reg);
2392
2393 /* Check if we can modify the set source in the original insn. */
2394 if (validate_change (insn, &SET_SRC (set), reg, 0))
2395 new_insn = emit_insn_before (new_insn, insn);
2396 else
2397 new_insn = emit_insn_after (new_insn, insn);
2398 }
2399
2400 gcse_create_count++;
2401
2402 if (dump_file)
2403 fprintf (dump_file,
2404 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2405 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2406 INSN_UID (insn), regno);
2407 }
2408
2409 /* Copy available expressions that reach the redundant expression
2410 to `reaching_reg'. */
2411
2412 static void
2413 pre_insert_copies (void)
2414 {
2415 unsigned int i, added_copy;
2416 struct expr *expr;
2417 struct occr *occr;
2418 struct occr *avail;
2419
2420 /* For each available expression in the table, copy the result to
2421 `reaching_reg' if the expression reaches a deleted one.
2422
2423 ??? The current algorithm is rather brute force.
2424 Need to do some profiling. */
2425
2426 for (i = 0; i < expr_hash_table.size; i++)
2427 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2428 {
2429 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2430 we don't want to insert a copy here because the expression may not
2431 really be redundant. So only insert an insn if the expression was
2432 deleted. This test also avoids further processing if the
2433 expression wasn't deleted anywhere. */
2434 if (expr->reaching_reg == NULL)
2435 continue;
2436
2437 /* Set when we add a copy for that expression. */
2438 added_copy = 0;
2439
2440 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2441 {
2442 if (! occr->deleted_p)
2443 continue;
2444
2445 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2446 {
2447 rtx insn = avail->insn;
2448
2449 /* No need to handle this one if handled already. */
2450 if (avail->copied_p)
2451 continue;
2452
2453 /* Don't handle this one if it's a redundant one. */
2454 if (INSN_DELETED_P (insn))
2455 continue;
2456
2457 /* Or if the expression doesn't reach the deleted one. */
2458 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2459 expr,
2460 BLOCK_FOR_INSN (occr->insn)))
2461 continue;
2462
2463 added_copy = 1;
2464
2465 /* Copy the result of avail to reaching_reg. */
2466 pre_insert_copy_insn (expr, insn);
2467 avail->copied_p = 1;
2468 }
2469 }
2470
2471 if (added_copy)
2472 update_ld_motion_stores (expr);
2473 }
2474 }
2475
2476 /* Emit move from SRC to DEST noting the equivalence with expression computed
2477 in INSN. */
2478
2479 static rtx
2480 gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2481 {
2482 rtx new_rtx;
2483 rtx set = single_set (insn), set2;
2484 rtx note;
2485 rtx eqv;
2486
2487 /* This should never fail since we're creating a reg->reg copy
2488 we've verified to be valid. */
2489
2490 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2491
2492 /* Note the equivalence for local CSE pass. */
2493 set2 = single_set (new_rtx);
2494 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2495 return new_rtx;
2496 if ((note = find_reg_equal_equiv_note (insn)))
2497 eqv = XEXP (note, 0);
2498 else
2499 eqv = SET_SRC (set);
2500
2501 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2502
2503 return new_rtx;
2504 }
2505
2506 /* Delete redundant computations.
2507 Deletion is done by changing the insn to copy the `reaching_reg' of
2508 the expression into the result of the SET. It is left to later passes
2509 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
2510
2511 Return nonzero if a change is made. */
2512
2513 static int
2514 pre_delete (void)
2515 {
2516 unsigned int i;
2517 int changed;
2518 struct expr *expr;
2519 struct occr *occr;
2520
2521 changed = 0;
2522 for (i = 0; i < expr_hash_table.size; i++)
2523 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2524 {
2525 int indx = expr->bitmap_index;
2526
2527 /* We only need to search antic_occr since we require ANTLOC != 0. */
2528 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2529 {
2530 rtx insn = occr->insn;
2531 rtx set;
2532 basic_block bb = BLOCK_FOR_INSN (insn);
2533
2534 /* We only delete insns that have a single_set. */
2535 if (TEST_BIT (pre_delete_map[bb->index], indx)
2536 && (set = single_set (insn)) != 0
2537 && dbg_cnt (pre_insn))
2538 {
2539 /* Create a pseudo-reg to store the result of reaching
2540 expressions into. Get the mode for the new pseudo from
2541 the mode of the original destination pseudo. */
2542 if (expr->reaching_reg == NULL)
2543 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2544
2545 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2546 delete_insn (insn);
2547 occr->deleted_p = 1;
2548 changed = 1;
2549 gcse_subst_count++;
2550
2551 if (dump_file)
2552 {
2553 fprintf (dump_file,
2554 "PRE: redundant insn %d (expression %d) in ",
2555 INSN_UID (insn), indx);
2556 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2557 bb->index, REGNO (expr->reaching_reg));
2558 }
2559 }
2560 }
2561 }
2562
2563 return changed;
2564 }
2565
2566 /* Perform GCSE optimizations using PRE.
2567 This is called by one_pre_gcse_pass after all the dataflow analysis
2568 has been done.
2569
2570 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2571 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2572 Compiler Design and Implementation.
2573
2574 ??? A new pseudo reg is created to hold the reaching expression. The nice
2575 thing about the classical approach is that it would try to use an existing
2576 reg. If the register can't be adequately optimized [i.e. we introduce
2577 reload problems], one could add a pass here to propagate the new register
2578 through the block.
2579
2580 ??? We don't handle single sets in PARALLELs because we're [currently] not
2581 able to copy the rest of the parallel when we insert copies to create full
2582 redundancies from partial redundancies. However, there's no reason why we
2583 can't handle PARALLELs in the cases where there are no partial
2584 redundancies. */
2585
2586 static int
2587 pre_gcse (struct edge_list *edge_list)
2588 {
2589 unsigned int i;
2590 int did_insert, changed;
2591 struct expr **index_map;
2592 struct expr *expr;
2593
2594 /* Compute a mapping from expression number (`bitmap_index') to
2595 hash table entry. */
2596
2597 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2598 for (i = 0; i < expr_hash_table.size; i++)
2599 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2600 index_map[expr->bitmap_index] = expr;
2601
2602 /* Delete the redundant insns first so that
2603 - we know what register to use for the new insns and for the other
2604 ones with reaching expressions
2605 - we know which insns are redundant when we go to create copies */
2606
2607 changed = pre_delete ();
2608 did_insert = pre_edge_insert (edge_list, index_map);
2609
2610 /* In other places with reaching expressions, copy the expression to the
2611 specially allocated pseudo-reg that reaches the redundant expr. */
2612 pre_insert_copies ();
2613 if (did_insert)
2614 {
2615 commit_edge_insertions ();
2616 changed = 1;
2617 }
2618
2619 free (index_map);
2620 return changed;
2621 }
2622
2623 /* Top level routine to perform one PRE GCSE pass.
2624
2625 Return nonzero if a change was made. */
2626
2627 static int
2628 one_pre_gcse_pass (void)
2629 {
2630 int changed = 0;
2631
2632 gcse_subst_count = 0;
2633 gcse_create_count = 0;
2634
2635 /* Return if there's nothing to do, or it is too expensive. */
2636 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
2637 || is_too_expensive (_("PRE disabled")))
2638 return 0;
2639
2640 /* We need alias. */
2641 init_alias_analysis ();
2642
2643 bytes_used = 0;
2644 gcc_obstack_init (&gcse_obstack);
2645 alloc_gcse_mem ();
2646
2647 alloc_hash_table (&expr_hash_table);
2648 add_noreturn_fake_exit_edges ();
2649 if (flag_gcse_lm)
2650 compute_ld_motion_mems ();
2651
2652 compute_hash_table (&expr_hash_table);
2653 if (flag_gcse_lm)
2654 trim_ld_motion_mems ();
2655 if (dump_file)
2656 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2657
2658 if (expr_hash_table.n_elems > 0)
2659 {
2660 struct edge_list *edge_list;
2661 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
2662 edge_list = compute_pre_data ();
2663 changed |= pre_gcse (edge_list);
2664 free_edge_list (edge_list);
2665 free_pre_mem ();
2666 }
2667
2668 if (flag_gcse_lm)
2669 free_ld_motion_mems ();
2670 remove_fake_exit_edges ();
2671 free_hash_table (&expr_hash_table);
2672
2673 free_gcse_mem ();
2674 obstack_free (&gcse_obstack, NULL);
2675
2676 /* We are finished with alias. */
2677 end_alias_analysis ();
2678
2679 if (dump_file)
2680 {
2681 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2682 current_function_name (), n_basic_blocks, bytes_used);
2683 fprintf (dump_file, "%d substs, %d insns created\n",
2684 gcse_subst_count, gcse_create_count);
2685 }
2686
2687 return changed;
2688 }
2689 \f
2690 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2691 to INSN. If such notes are added to an insn which references a
2692 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2693 that note, because the following loop optimization pass requires
2694 them. */
2695
2696 /* ??? If there was a jump optimization pass after gcse and before loop,
2697 then we would not need to do this here, because jump would add the
2698 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2699
2700 static void
2701 add_label_notes (rtx x, rtx insn)
2702 {
2703 enum rtx_code code = GET_CODE (x);
2704 int i, j;
2705 const char *fmt;
2706
2707 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2708 {
2709 /* This code used to ignore labels that referred to dispatch tables to
2710 avoid flow generating (slightly) worse code.
2711
2712 We no longer ignore such label references (see LABEL_REF handling in
2713 mark_jump_label for additional information). */
2714
2715 /* There's no reason for current users to emit jump-insns with
2716 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2717 notes. */
2718 gcc_assert (!JUMP_P (insn));
2719 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
2720
2721 if (LABEL_P (XEXP (x, 0)))
2722 LABEL_NUSES (XEXP (x, 0))++;
2723
2724 return;
2725 }
2726
2727 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2728 {
2729 if (fmt[i] == 'e')
2730 add_label_notes (XEXP (x, i), insn);
2731 else if (fmt[i] == 'E')
2732 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2733 add_label_notes (XVECEXP (x, i, j), insn);
2734 }
2735 }
2736
2737 /* Code Hoisting variables and subroutines. */
2738
2739 /* Very busy expressions. */
2740 static sbitmap *hoist_vbein;
2741 static sbitmap *hoist_vbeout;
2742
2743 /* ??? We could compute post dominators and run this algorithm in
2744 reverse to perform tail merging, doing so would probably be
2745 more effective than the tail merging code in jump.c.
2746
2747 It's unclear if tail merging could be run in parallel with
2748 code hoisting. It would be nice. */
2749
2750 /* Allocate vars used for code hoisting analysis. */
2751
2752 static void
2753 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2754 {
2755 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2756 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2757 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2758
2759 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2760 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2761 }
2762
2763 /* Free vars used for code hoisting analysis. */
2764
2765 static void
2766 free_code_hoist_mem (void)
2767 {
2768 sbitmap_vector_free (antloc);
2769 sbitmap_vector_free (transp);
2770 sbitmap_vector_free (comp);
2771
2772 sbitmap_vector_free (hoist_vbein);
2773 sbitmap_vector_free (hoist_vbeout);
2774
2775 free_dominance_info (CDI_DOMINATORS);
2776 }
2777
2778 /* Compute the very busy expressions at entry/exit from each block.
2779
2780 An expression is very busy if all paths from a given point
2781 compute the expression. */
2782
2783 static void
2784 compute_code_hoist_vbeinout (void)
2785 {
2786 int changed, passes;
2787 basic_block bb;
2788
2789 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
2790 sbitmap_vector_zero (hoist_vbein, last_basic_block);
2791
2792 passes = 0;
2793 changed = 1;
2794
2795 while (changed)
2796 {
2797 changed = 0;
2798
2799 /* We scan the blocks in the reverse order to speed up
2800 the convergence. */
2801 FOR_EACH_BB_REVERSE (bb)
2802 {
2803 if (bb->next_bb != EXIT_BLOCK_PTR)
2804 {
2805 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
2806 hoist_vbein, bb);
2807
2808 /* Include expressions in VBEout that are calculated
2809 in BB and available at its end. */
2810 sbitmap_a_or_b (hoist_vbeout[bb->index],
2811 hoist_vbeout[bb->index], comp[bb->index]);
2812 }
2813
2814 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
2815 antloc[bb->index],
2816 hoist_vbeout[bb->index],
2817 transp[bb->index]);
2818 }
2819
2820 passes++;
2821 }
2822
2823 if (dump_file)
2824 {
2825 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2826
2827 FOR_EACH_BB (bb)
2828 {
2829 fprintf (dump_file, "vbein (%d): ", bb->index);
2830 dump_sbitmap_file (dump_file, hoist_vbein[bb->index]);
2831 fprintf (dump_file, "vbeout(%d): ", bb->index);
2832 dump_sbitmap_file (dump_file, hoist_vbeout[bb->index]);
2833 }
2834 }
2835 }
2836
2837 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2838
2839 static void
2840 compute_code_hoist_data (void)
2841 {
2842 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2843 prune_expressions (false);
2844 compute_code_hoist_vbeinout ();
2845 calculate_dominance_info (CDI_DOMINATORS);
2846 if (dump_file)
2847 fprintf (dump_file, "\n");
2848 }
2849
2850 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2851 flow graph, if it can reach BB unimpared. Stop the search if the
2852 expression would need to be moved more than DISTANCE instructions.
2853
2854 DISTANCE is the number of instructions through which EXPR can be
2855 hoisted up in flow graph.
2856
2857 BB_SIZE points to an array which contains the number of instructions
2858 for each basic block.
2859
2860 PRESSURE_CLASS and NREGS are register class and number of hard registers
2861 for storing EXPR.
2862
2863 HOISTED_BBS points to a bitmap indicating basic blocks through which
2864 EXPR is hoisted.
2865
2866 It's unclear exactly what Muchnick meant by "unimpared". It seems
2867 to me that the expression must either be computed or transparent in
2868 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2869 would allow the expression to be hoisted out of loops, even if
2870 the expression wasn't a loop invariant.
2871
2872 Contrast this to reachability for PRE where an expression is
2873 considered reachable if *any* path reaches instead of *all*
2874 paths. */
2875
2876 static int
2877 should_hoist_expr_to_dom (basic_block expr_bb, struct expr *expr,
2878 basic_block bb, sbitmap visited, int distance,
2879 int *bb_size, enum reg_class pressure_class,
2880 int *nregs, bitmap hoisted_bbs)
2881 {
2882 unsigned int i;
2883 edge pred;
2884 edge_iterator ei;
2885 sbitmap_iterator sbi;
2886 int visited_allocated_locally = 0;
2887
2888 /* Terminate the search if distance, for which EXPR is allowed to move,
2889 is exhausted. */
2890 if (distance > 0)
2891 {
2892 /* Let EXPR be hoisted through basic block at no cost if the block
2893 has low register pressure. An exception is constant expression,
2894 because hoisting constant expr aggressively results in worse code.
2895 The exception is made by the observation of CSiBE on ARM target,
2896 while it has no obvious effect on other targets like x86, x86_64,
2897 mips and powerpc. */
2898 if (!flag_ira_hoist_pressure
2899 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2900 >= ira_class_hard_regs_num[pressure_class]
2901 || CONST_INT_P (expr->expr)))
2902 distance -= bb_size[bb->index];
2903
2904 if (distance <= 0)
2905 return 0;
2906 }
2907 else
2908 gcc_assert (distance == 0);
2909
2910 if (visited == NULL)
2911 {
2912 visited_allocated_locally = 1;
2913 visited = sbitmap_alloc (last_basic_block);
2914 sbitmap_zero (visited);
2915 }
2916
2917 FOR_EACH_EDGE (pred, ei, bb->preds)
2918 {
2919 basic_block pred_bb = pred->src;
2920
2921 if (pred->src == ENTRY_BLOCK_PTR)
2922 break;
2923 else if (pred_bb == expr_bb)
2924 continue;
2925 else if (TEST_BIT (visited, pred_bb->index))
2926 continue;
2927 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
2928 break;
2929 /* Not killed. */
2930 else
2931 {
2932 SET_BIT (visited, pred_bb->index);
2933 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2934 visited, distance, bb_size,
2935 pressure_class, nregs, hoisted_bbs))
2936 break;
2937 }
2938 }
2939 if (visited_allocated_locally)
2940 {
2941 /* If EXPR can be hoisted to expr_bb, record basic blocks through
2942 which EXPR is hoisted in hoisted_bbs. Also update register
2943 pressure for basic blocks newly added in hoisted_bbs. */
2944 if (flag_ira_hoist_pressure && !pred)
2945 {
2946 EXECUTE_IF_SET_IN_SBITMAP (visited, 0, i, sbi)
2947 if (!bitmap_bit_p (hoisted_bbs, i))
2948 {
2949 bitmap_set_bit (hoisted_bbs, i);
2950 BB_DATA (BASIC_BLOCK (i))->max_reg_pressure[pressure_class]
2951 += *nregs;
2952 }
2953 }
2954 sbitmap_free (visited);
2955 }
2956
2957 return (pred == NULL);
2958 }
2959 \f
2960 /* Find occurrence in BB. */
2961
2962 static struct occr *
2963 find_occr_in_bb (struct occr *occr, basic_block bb)
2964 {
2965 /* Find the right occurrence of this expression. */
2966 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
2967 occr = occr->next;
2968
2969 return occr;
2970 }
2971
2972 /* Actually perform code hoisting.
2973
2974 The code hoisting pass can hoist multiple computations of the same
2975 expression along dominated path to a dominating basic block, like
2976 from b2/b3 to b1 as depicted below:
2977
2978 b1 ------
2979 /\ |
2980 / \ |
2981 bx by distance
2982 / \ |
2983 / \ |
2984 b2 b3 ------
2985
2986 Unfortunately code hoisting generally extends the live range of an
2987 output pseudo register, which increases register pressure and hurts
2988 register allocation. To address this issue, an attribute MAX_DISTANCE
2989 is computed and attached to each expression. The attribute is computed
2990 from rtx cost of the corresponding expression and it's used to control
2991 how long the expression can be hoisted up in flow graph. As the
2992 expression is hoisted up in flow graph, GCC decreases its DISTANCE
2993 and stops the hoist if DISTANCE reaches 0.
2994
2995 Option "-fira-hoist-pressure" implements register pressure directed
2996 hoist based on upper method. The rationale is:
2997 1. Calculate register pressure for each basic block by reusing IRA
2998 facility.
2999 2. When expression is hoisted through one basic block, GCC checks
3000 register pressure of the basic block and decrease DISTANCE only
3001 when the register pressure is high. In other words, expression
3002 will be hoisted through basic block with low register pressure
3003 at no cost.
3004 3. Update register pressure information for basic blocks through
3005 which expression is hoisted.
3006 TODO: It is possible to have register pressure decreased because
3007 of shrinked live ranges of input pseudo registers when hoisting
3008 an expression. For now, this effect is not simulated and we just
3009 increase register pressure for hoisted expressions. */
3010
3011 static int
3012 hoist_code (void)
3013 {
3014 basic_block bb, dominated;
3015 VEC (basic_block, heap) *dom_tree_walk;
3016 unsigned int dom_tree_walk_index;
3017 VEC (basic_block, heap) *domby;
3018 unsigned int i, j, k;
3019 struct expr **index_map;
3020 struct expr *expr;
3021 int *to_bb_head;
3022 int *bb_size;
3023 int changed = 0;
3024 struct bb_data *data;
3025 /* Basic blocks that have occurrences reachable from BB. */
3026 bitmap from_bbs;
3027 /* Basic blocks through which expr is hoisted. */
3028 bitmap hoisted_bbs = NULL;
3029 bitmap_iterator bi;
3030
3031 /* Compute a mapping from expression number (`bitmap_index') to
3032 hash table entry. */
3033
3034 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
3035 for (i = 0; i < expr_hash_table.size; i++)
3036 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3037 index_map[expr->bitmap_index] = expr;
3038
3039 /* Calculate sizes of basic blocks and note how far
3040 each instruction is from the start of its block. We then use this
3041 data to restrict distance an expression can travel. */
3042
3043 to_bb_head = XCNEWVEC (int, get_max_uid ());
3044 bb_size = XCNEWVEC (int, last_basic_block);
3045
3046 FOR_EACH_BB (bb)
3047 {
3048 rtx insn;
3049 int to_head;
3050
3051 to_head = 0;
3052 FOR_BB_INSNS (bb, insn)
3053 {
3054 /* Don't count debug instructions to avoid them affecting
3055 decision choices. */
3056 if (NONDEBUG_INSN_P (insn))
3057 to_bb_head[INSN_UID (insn)] = to_head++;
3058 }
3059
3060 bb_size[bb->index] = to_head;
3061 }
3062
3063 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
3064 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
3065 == ENTRY_BLOCK_PTR->next_bb));
3066
3067 from_bbs = BITMAP_ALLOC (NULL);
3068 if (flag_ira_hoist_pressure)
3069 hoisted_bbs = BITMAP_ALLOC (NULL);
3070
3071 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3072 ENTRY_BLOCK_PTR->next_bb);
3073
3074 /* Walk over each basic block looking for potentially hoistable
3075 expressions, nothing gets hoisted from the entry block. */
3076 FOR_EACH_VEC_ELT (basic_block, dom_tree_walk, dom_tree_walk_index, bb)
3077 {
3078 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3079
3080 if (VEC_length (basic_block, domby) == 0)
3081 continue;
3082
3083 /* Examine each expression that is very busy at the exit of this
3084 block. These are the potentially hoistable expressions. */
3085 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3086 {
3087 if (TEST_BIT (hoist_vbeout[bb->index], i))
3088 {
3089 int nregs = 0;
3090 enum reg_class pressure_class = NO_REGS;
3091 /* Current expression. */
3092 struct expr *expr = index_map[i];
3093 /* Number of occurrences of EXPR that can be hoisted to BB. */
3094 int hoistable = 0;
3095 /* Occurrences reachable from BB. */
3096 VEC (occr_t, heap) *occrs_to_hoist = NULL;
3097 /* We want to insert the expression into BB only once, so
3098 note when we've inserted it. */
3099 int insn_inserted_p;
3100 occr_t occr;
3101
3102 /* If an expression is computed in BB and is available at end of
3103 BB, hoist all occurrences dominated by BB to BB. */
3104 if (TEST_BIT (comp[bb->index], i))
3105 {
3106 occr = find_occr_in_bb (expr->antic_occr, bb);
3107
3108 if (occr)
3109 {
3110 /* An occurrence might've been already deleted
3111 while processing a dominator of BB. */
3112 if (!occr->deleted_p)
3113 {
3114 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3115 hoistable++;
3116 }
3117 }
3118 else
3119 hoistable++;
3120 }
3121
3122 /* We've found a potentially hoistable expression, now
3123 we look at every block BB dominates to see if it
3124 computes the expression. */
3125 FOR_EACH_VEC_ELT (basic_block, domby, j, dominated)
3126 {
3127 int max_distance;
3128
3129 /* Ignore self dominance. */
3130 if (bb == dominated)
3131 continue;
3132 /* We've found a dominated block, now see if it computes
3133 the busy expression and whether or not moving that
3134 expression to the "beginning" of that block is safe. */
3135 if (!TEST_BIT (antloc[dominated->index], i))
3136 continue;
3137
3138 occr = find_occr_in_bb (expr->antic_occr, dominated);
3139 gcc_assert (occr);
3140
3141 /* An occurrence might've been already deleted
3142 while processing a dominator of BB. */
3143 if (occr->deleted_p)
3144 continue;
3145 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3146
3147 max_distance = expr->max_distance;
3148 if (max_distance > 0)
3149 /* Adjust MAX_DISTANCE to account for the fact that
3150 OCCR won't have to travel all of DOMINATED, but
3151 only part of it. */
3152 max_distance += (bb_size[dominated->index]
3153 - to_bb_head[INSN_UID (occr->insn)]);
3154
3155 pressure_class = get_pressure_class_and_nregs (occr->insn,
3156 &nregs);
3157
3158 /* Note if the expression should be hoisted from the dominated
3159 block to BB if it can reach DOMINATED unimpared.
3160
3161 Keep track of how many times this expression is hoistable
3162 from a dominated block into BB. */
3163 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3164 max_distance, bb_size,
3165 pressure_class, &nregs,
3166 hoisted_bbs))
3167 {
3168 hoistable++;
3169 VEC_safe_push (occr_t, heap,
3170 occrs_to_hoist, occr);
3171 bitmap_set_bit (from_bbs, dominated->index);
3172 }
3173 }
3174
3175 /* If we found more than one hoistable occurrence of this
3176 expression, then note it in the vector of expressions to
3177 hoist. It makes no sense to hoist things which are computed
3178 in only one BB, and doing so tends to pessimize register
3179 allocation. One could increase this value to try harder
3180 to avoid any possible code expansion due to register
3181 allocation issues; however experiments have shown that
3182 the vast majority of hoistable expressions are only movable
3183 from two successors, so raising this threshold is likely
3184 to nullify any benefit we get from code hoisting. */
3185 if (hoistable > 1 && dbg_cnt (hoist_insn))
3186 {
3187 /* If (hoistable != VEC_length), then there is
3188 an occurrence of EXPR in BB itself. Don't waste
3189 time looking for LCA in this case. */
3190 if ((unsigned) hoistable
3191 == VEC_length (occr_t, occrs_to_hoist))
3192 {
3193 basic_block lca;
3194
3195 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3196 from_bbs);
3197 if (lca != bb)
3198 /* Punt, it's better to hoist these occurrences to
3199 LCA. */
3200 VEC_free (occr_t, heap, occrs_to_hoist);
3201 }
3202 }
3203 else
3204 /* Punt, no point hoisting a single occurence. */
3205 VEC_free (occr_t, heap, occrs_to_hoist);
3206
3207 if (flag_ira_hoist_pressure
3208 && !VEC_empty (occr_t, occrs_to_hoist))
3209 {
3210 /* Update register pressure for basic block to which expr
3211 is hoisted. */
3212 data = BB_DATA (bb);
3213 data->max_reg_pressure[pressure_class] += nregs;
3214 }
3215 else if (flag_ira_hoist_pressure)
3216 {
3217 /* Restore register pressure of basic block recorded in
3218 hoisted_bbs when expr will not be hoisted. */
3219 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3220 {
3221 data = BB_DATA (BASIC_BLOCK (k));
3222 data->max_reg_pressure[pressure_class] -= nregs;
3223 }
3224 }
3225
3226 if (flag_ira_hoist_pressure)
3227 bitmap_clear (hoisted_bbs);
3228
3229 insn_inserted_p = 0;
3230
3231 /* Walk through occurrences of I'th expressions we want
3232 to hoist to BB and make the transformations. */
3233 FOR_EACH_VEC_ELT (occr_t, occrs_to_hoist, j, occr)
3234 {
3235 rtx insn;
3236 rtx set;
3237
3238 gcc_assert (!occr->deleted_p);
3239
3240 insn = occr->insn;
3241 set = single_set (insn);
3242 gcc_assert (set);
3243
3244 /* Create a pseudo-reg to store the result of reaching
3245 expressions into. Get the mode for the new pseudo
3246 from the mode of the original destination pseudo.
3247
3248 It is important to use new pseudos whenever we
3249 emit a set. This will allow reload to use
3250 rematerialization for such registers. */
3251 if (!insn_inserted_p)
3252 expr->reaching_reg
3253 = gen_reg_rtx_and_attrs (SET_DEST (set));
3254
3255 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3256 insn);
3257 delete_insn (insn);
3258 occr->deleted_p = 1;
3259 changed = 1;
3260 gcse_subst_count++;
3261
3262 if (!insn_inserted_p)
3263 {
3264 insert_insn_end_basic_block (expr, bb);
3265 insn_inserted_p = 1;
3266 }
3267 }
3268
3269 VEC_free (occr_t, heap, occrs_to_hoist);
3270 bitmap_clear (from_bbs);
3271 }
3272 }
3273 VEC_free (basic_block, heap, domby);
3274 }
3275
3276 VEC_free (basic_block, heap, dom_tree_walk);
3277 BITMAP_FREE (from_bbs);
3278 if (flag_ira_hoist_pressure)
3279 BITMAP_FREE (hoisted_bbs);
3280
3281 free (bb_size);
3282 free (to_bb_head);
3283 free (index_map);
3284
3285 return changed;
3286 }
3287
3288 /* Return pressure class and number of needed hard registers (through
3289 *NREGS) of register REGNO. */
3290 static enum reg_class
3291 get_regno_pressure_class (int regno, int *nregs)
3292 {
3293 if (regno >= FIRST_PSEUDO_REGISTER)
3294 {
3295 enum reg_class pressure_class;
3296
3297 pressure_class = reg_allocno_class (regno);
3298 pressure_class = ira_pressure_class_translate[pressure_class];
3299 *nregs
3300 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3301 return pressure_class;
3302 }
3303 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3304 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3305 {
3306 *nregs = 1;
3307 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3308 }
3309 else
3310 {
3311 *nregs = 0;
3312 return NO_REGS;
3313 }
3314 }
3315
3316 /* Return pressure class and number of hard registers (through *NREGS)
3317 for destination of INSN. */
3318 static enum reg_class
3319 get_pressure_class_and_nregs (rtx insn, int *nregs)
3320 {
3321 rtx reg;
3322 enum reg_class pressure_class;
3323 rtx set = single_set (insn);
3324
3325 /* Considered invariant insns have only one set. */
3326 gcc_assert (set != NULL_RTX);
3327 reg = SET_DEST (set);
3328 if (GET_CODE (reg) == SUBREG)
3329 reg = SUBREG_REG (reg);
3330 if (MEM_P (reg))
3331 {
3332 *nregs = 0;
3333 pressure_class = NO_REGS;
3334 }
3335 else
3336 {
3337 gcc_assert (REG_P (reg));
3338 pressure_class = reg_allocno_class (REGNO (reg));
3339 pressure_class = ira_pressure_class_translate[pressure_class];
3340 *nregs
3341 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3342 }
3343 return pressure_class;
3344 }
3345
3346 /* Increase (if INCR_P) or decrease current register pressure for
3347 register REGNO. */
3348 static void
3349 change_pressure (int regno, bool incr_p)
3350 {
3351 int nregs;
3352 enum reg_class pressure_class;
3353
3354 pressure_class = get_regno_pressure_class (regno, &nregs);
3355 if (! incr_p)
3356 curr_reg_pressure[pressure_class] -= nregs;
3357 else
3358 {
3359 curr_reg_pressure[pressure_class] += nregs;
3360 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3361 < curr_reg_pressure[pressure_class])
3362 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3363 = curr_reg_pressure[pressure_class];
3364 }
3365 }
3366
3367 /* Calculate register pressure for each basic block by walking insns
3368 from last to first. */
3369 static void
3370 calculate_bb_reg_pressure (void)
3371 {
3372 int i;
3373 unsigned int j;
3374 rtx insn;
3375 basic_block bb;
3376 bitmap curr_regs_live;
3377 bitmap_iterator bi;
3378
3379
3380 ira_setup_eliminable_regset (false);
3381 curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3382 FOR_EACH_BB (bb)
3383 {
3384 curr_bb = bb;
3385 bitmap_copy (curr_regs_live, DF_LR_OUT (bb));
3386 for (i = 0; i < ira_pressure_classes_num; i++)
3387 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3388 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3389 change_pressure (j, true);
3390
3391 FOR_BB_INSNS_REVERSE (bb, insn)
3392 {
3393 rtx dreg;
3394 int regno;
3395 df_ref *def_rec, *use_rec;
3396
3397 if (! NONDEBUG_INSN_P (insn))
3398 continue;
3399
3400 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
3401 {
3402 dreg = DF_REF_REAL_REG (*def_rec);
3403 gcc_assert (REG_P (dreg));
3404 regno = REGNO (dreg);
3405 if (!(DF_REF_FLAGS (*def_rec)
3406 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3407 {
3408 if (bitmap_clear_bit (curr_regs_live, regno))
3409 change_pressure (regno, false);
3410 }
3411 }
3412
3413 for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
3414 {
3415 dreg = DF_REF_REAL_REG (*use_rec);
3416 gcc_assert (REG_P (dreg));
3417 regno = REGNO (dreg);
3418 if (bitmap_set_bit (curr_regs_live, regno))
3419 change_pressure (regno, true);
3420 }
3421 }
3422 }
3423 BITMAP_FREE (curr_regs_live);
3424
3425 if (dump_file == NULL)
3426 return;
3427
3428 fprintf (dump_file, "\nRegister Pressure: \n");
3429 FOR_EACH_BB (bb)
3430 {
3431 fprintf (dump_file, " Basic block %d: \n", bb->index);
3432 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3433 {
3434 enum reg_class pressure_class;
3435
3436 pressure_class = ira_pressure_classes[i];
3437 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3438 continue;
3439
3440 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3441 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3442 }
3443 }
3444 fprintf (dump_file, "\n");
3445 }
3446
3447 /* Top level routine to perform one code hoisting (aka unification) pass
3448
3449 Return nonzero if a change was made. */
3450
3451 static int
3452 one_code_hoisting_pass (void)
3453 {
3454 int changed = 0;
3455
3456 gcse_subst_count = 0;
3457 gcse_create_count = 0;
3458
3459 /* Return if there's nothing to do, or it is too expensive. */
3460 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3461 || is_too_expensive (_("GCSE disabled")))
3462 return 0;
3463
3464 doing_code_hoisting_p = true;
3465
3466 /* Calculate register pressure for each basic block. */
3467 if (flag_ira_hoist_pressure)
3468 {
3469 regstat_init_n_sets_and_refs ();
3470 ira_set_pseudo_classes (false, dump_file);
3471 alloc_aux_for_blocks (sizeof (struct bb_data));
3472 calculate_bb_reg_pressure ();
3473 regstat_free_n_sets_and_refs ();
3474 }
3475
3476 /* We need alias. */
3477 init_alias_analysis ();
3478
3479 bytes_used = 0;
3480 gcc_obstack_init (&gcse_obstack);
3481 alloc_gcse_mem ();
3482
3483 alloc_hash_table (&expr_hash_table);
3484 compute_hash_table (&expr_hash_table);
3485 if (dump_file)
3486 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3487
3488 if (expr_hash_table.n_elems > 0)
3489 {
3490 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
3491 compute_code_hoist_data ();
3492 changed = hoist_code ();
3493 free_code_hoist_mem ();
3494 }
3495
3496 if (flag_ira_hoist_pressure)
3497 {
3498 free_aux_for_blocks ();
3499 free_reg_info ();
3500 }
3501 free_hash_table (&expr_hash_table);
3502 free_gcse_mem ();
3503 obstack_free (&gcse_obstack, NULL);
3504
3505 /* We are finished with alias. */
3506 end_alias_analysis ();
3507
3508 if (dump_file)
3509 {
3510 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3511 current_function_name (), n_basic_blocks, bytes_used);
3512 fprintf (dump_file, "%d substs, %d insns created\n",
3513 gcse_subst_count, gcse_create_count);
3514 }
3515
3516 doing_code_hoisting_p = false;
3517
3518 return changed;
3519 }
3520 \f
3521 /* Here we provide the things required to do store motion towards the exit.
3522 In order for this to be effective, gcse also needed to be taught how to
3523 move a load when it is killed only by a store to itself.
3524
3525 int i;
3526 float a[10];
3527
3528 void foo(float scale)
3529 {
3530 for (i=0; i<10; i++)
3531 a[i] *= scale;
3532 }
3533
3534 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3535 the load out since its live around the loop, and stored at the bottom
3536 of the loop.
3537
3538 The 'Load Motion' referred to and implemented in this file is
3539 an enhancement to gcse which when using edge based LCM, recognizes
3540 this situation and allows gcse to move the load out of the loop.
3541
3542 Once gcse has hoisted the load, store motion can then push this
3543 load towards the exit, and we end up with no loads or stores of 'i'
3544 in the loop. */
3545
3546 static hashval_t
3547 pre_ldst_expr_hash (const void *p)
3548 {
3549 int do_not_record_p = 0;
3550 const struct ls_expr *const x = (const struct ls_expr *) p;
3551 return
3552 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
3553 }
3554
3555 static int
3556 pre_ldst_expr_eq (const void *p1, const void *p2)
3557 {
3558 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
3559 *const ptr2 = (const struct ls_expr *) p2;
3560 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
3561 }
3562
3563 /* This will search the ldst list for a matching expression. If it
3564 doesn't find one, we create one and initialize it. */
3565
3566 static struct ls_expr *
3567 ldst_entry (rtx x)
3568 {
3569 int do_not_record_p = 0;
3570 struct ls_expr * ptr;
3571 unsigned int hash;
3572 void **slot;
3573 struct ls_expr e;
3574
3575 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3576 NULL, /*have_reg_qty=*/false);
3577
3578 e.pattern = x;
3579 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
3580 if (*slot)
3581 return (struct ls_expr *)*slot;
3582
3583 ptr = XNEW (struct ls_expr);
3584
3585 ptr->next = pre_ldst_mems;
3586 ptr->expr = NULL;
3587 ptr->pattern = x;
3588 ptr->pattern_regs = NULL_RTX;
3589 ptr->loads = NULL_RTX;
3590 ptr->stores = NULL_RTX;
3591 ptr->reaching_reg = NULL_RTX;
3592 ptr->invalid = 0;
3593 ptr->index = 0;
3594 ptr->hash_index = hash;
3595 pre_ldst_mems = ptr;
3596 *slot = ptr;
3597
3598 return ptr;
3599 }
3600
3601 /* Free up an individual ldst entry. */
3602
3603 static void
3604 free_ldst_entry (struct ls_expr * ptr)
3605 {
3606 free_INSN_LIST_list (& ptr->loads);
3607 free_INSN_LIST_list (& ptr->stores);
3608
3609 free (ptr);
3610 }
3611
3612 /* Free up all memory associated with the ldst list. */
3613
3614 static void
3615 free_ld_motion_mems (void)
3616 {
3617 if (pre_ldst_table)
3618 htab_delete (pre_ldst_table);
3619 pre_ldst_table = NULL;
3620
3621 while (pre_ldst_mems)
3622 {
3623 struct ls_expr * tmp = pre_ldst_mems;
3624
3625 pre_ldst_mems = pre_ldst_mems->next;
3626
3627 free_ldst_entry (tmp);
3628 }
3629
3630 pre_ldst_mems = NULL;
3631 }
3632
3633 /* Dump debugging info about the ldst list. */
3634
3635 static void
3636 print_ldst_list (FILE * file)
3637 {
3638 struct ls_expr * ptr;
3639
3640 fprintf (file, "LDST list: \n");
3641
3642 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3643 {
3644 fprintf (file, " Pattern (%3d): ", ptr->index);
3645
3646 print_rtl (file, ptr->pattern);
3647
3648 fprintf (file, "\n Loads : ");
3649
3650 if (ptr->loads)
3651 print_rtl (file, ptr->loads);
3652 else
3653 fprintf (file, "(nil)");
3654
3655 fprintf (file, "\n Stores : ");
3656
3657 if (ptr->stores)
3658 print_rtl (file, ptr->stores);
3659 else
3660 fprintf (file, "(nil)");
3661
3662 fprintf (file, "\n\n");
3663 }
3664
3665 fprintf (file, "\n");
3666 }
3667
3668 /* Returns 1 if X is in the list of ldst only expressions. */
3669
3670 static struct ls_expr *
3671 find_rtx_in_ldst (rtx x)
3672 {
3673 struct ls_expr e;
3674 void **slot;
3675 if (!pre_ldst_table)
3676 return NULL;
3677 e.pattern = x;
3678 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
3679 if (!slot || ((struct ls_expr *)*slot)->invalid)
3680 return NULL;
3681 return (struct ls_expr *) *slot;
3682 }
3683 \f
3684 /* Load Motion for loads which only kill themselves. */
3685
3686 /* Return true if x, a MEM, is a simple access with no side effects.
3687 These are the types of loads we consider for the ld_motion list,
3688 otherwise we let the usual aliasing take care of it. */
3689
3690 static int
3691 simple_mem (const_rtx x)
3692 {
3693 if (MEM_VOLATILE_P (x))
3694 return 0;
3695
3696 if (GET_MODE (x) == BLKmode)
3697 return 0;
3698
3699 /* If we are handling exceptions, we must be careful with memory references
3700 that may trap. If we are not, the behavior is undefined, so we may just
3701 continue. */
3702 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3703 return 0;
3704
3705 if (side_effects_p (x))
3706 return 0;
3707
3708 /* Do not consider function arguments passed on stack. */
3709 if (reg_mentioned_p (stack_pointer_rtx, x))
3710 return 0;
3711
3712 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3713 return 0;
3714
3715 return 1;
3716 }
3717
3718 /* Make sure there isn't a buried reference in this pattern anywhere.
3719 If there is, invalidate the entry for it since we're not capable
3720 of fixing it up just yet.. We have to be sure we know about ALL
3721 loads since the aliasing code will allow all entries in the
3722 ld_motion list to not-alias itself. If we miss a load, we will get
3723 the wrong value since gcse might common it and we won't know to
3724 fix it up. */
3725
3726 static void
3727 invalidate_any_buried_refs (rtx x)
3728 {
3729 const char * fmt;
3730 int i, j;
3731 struct ls_expr * ptr;
3732
3733 /* Invalidate it in the list. */
3734 if (MEM_P (x) && simple_mem (x))
3735 {
3736 ptr = ldst_entry (x);
3737 ptr->invalid = 1;
3738 }
3739
3740 /* Recursively process the insn. */
3741 fmt = GET_RTX_FORMAT (GET_CODE (x));
3742
3743 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3744 {
3745 if (fmt[i] == 'e')
3746 invalidate_any_buried_refs (XEXP (x, i));
3747 else if (fmt[i] == 'E')
3748 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3749 invalidate_any_buried_refs (XVECEXP (x, i, j));
3750 }
3751 }
3752
3753 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3754 being defined as MEM loads and stores to symbols, with no side effects
3755 and no registers in the expression. For a MEM destination, we also
3756 check that the insn is still valid if we replace the destination with a
3757 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3758 which don't match this criteria, they are invalidated and trimmed out
3759 later. */
3760
3761 static void
3762 compute_ld_motion_mems (void)
3763 {
3764 struct ls_expr * ptr;
3765 basic_block bb;
3766 rtx insn;
3767
3768 pre_ldst_mems = NULL;
3769 pre_ldst_table
3770 = htab_create (13, pre_ldst_expr_hash, pre_ldst_expr_eq, NULL);
3771
3772 FOR_EACH_BB (bb)
3773 {
3774 FOR_BB_INSNS (bb, insn)
3775 {
3776 if (NONDEBUG_INSN_P (insn))
3777 {
3778 if (GET_CODE (PATTERN (insn)) == SET)
3779 {
3780 rtx src = SET_SRC (PATTERN (insn));
3781 rtx dest = SET_DEST (PATTERN (insn));
3782
3783 /* Check for a simple LOAD... */
3784 if (MEM_P (src) && simple_mem (src))
3785 {
3786 ptr = ldst_entry (src);
3787 if (REG_P (dest))
3788 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3789 else
3790 ptr->invalid = 1;
3791 }
3792 else
3793 {
3794 /* Make sure there isn't a buried load somewhere. */
3795 invalidate_any_buried_refs (src);
3796 }
3797
3798 /* Check for stores. Don't worry about aliased ones, they
3799 will block any movement we might do later. We only care
3800 about this exact pattern since those are the only
3801 circumstance that we will ignore the aliasing info. */
3802 if (MEM_P (dest) && simple_mem (dest))
3803 {
3804 ptr = ldst_entry (dest);
3805
3806 if (! MEM_P (src)
3807 && GET_CODE (src) != ASM_OPERANDS
3808 /* Check for REG manually since want_to_gcse_p
3809 returns 0 for all REGs. */
3810 && can_assign_to_reg_without_clobbers_p (src))
3811 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3812 else
3813 ptr->invalid = 1;
3814 }
3815 }
3816 else
3817 invalidate_any_buried_refs (PATTERN (insn));
3818 }
3819 }
3820 }
3821 }
3822
3823 /* Remove any references that have been either invalidated or are not in the
3824 expression list for pre gcse. */
3825
3826 static void
3827 trim_ld_motion_mems (void)
3828 {
3829 struct ls_expr * * last = & pre_ldst_mems;
3830 struct ls_expr * ptr = pre_ldst_mems;
3831
3832 while (ptr != NULL)
3833 {
3834 struct expr * expr;
3835
3836 /* Delete if entry has been made invalid. */
3837 if (! ptr->invalid)
3838 {
3839 /* Delete if we cannot find this mem in the expression list. */
3840 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3841
3842 for (expr = expr_hash_table.table[hash];
3843 expr != NULL;
3844 expr = expr->next_same_hash)
3845 if (expr_equiv_p (expr->expr, ptr->pattern))
3846 break;
3847 }
3848 else
3849 expr = (struct expr *) 0;
3850
3851 if (expr)
3852 {
3853 /* Set the expression field if we are keeping it. */
3854 ptr->expr = expr;
3855 last = & ptr->next;
3856 ptr = ptr->next;
3857 }
3858 else
3859 {
3860 *last = ptr->next;
3861 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
3862 free_ldst_entry (ptr);
3863 ptr = * last;
3864 }
3865 }
3866
3867 /* Show the world what we've found. */
3868 if (dump_file && pre_ldst_mems != NULL)
3869 print_ldst_list (dump_file);
3870 }
3871
3872 /* This routine will take an expression which we are replacing with
3873 a reaching register, and update any stores that are needed if
3874 that expression is in the ld_motion list. Stores are updated by
3875 copying their SRC to the reaching register, and then storing
3876 the reaching register into the store location. These keeps the
3877 correct value in the reaching register for the loads. */
3878
3879 static void
3880 update_ld_motion_stores (struct expr * expr)
3881 {
3882 struct ls_expr * mem_ptr;
3883
3884 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3885 {
3886 /* We can try to find just the REACHED stores, but is shouldn't
3887 matter to set the reaching reg everywhere... some might be
3888 dead and should be eliminated later. */
3889
3890 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3891 where reg is the reaching reg used in the load. We checked in
3892 compute_ld_motion_mems that we can replace (set mem expr) with
3893 (set reg expr) in that insn. */
3894 rtx list = mem_ptr->stores;
3895
3896 for ( ; list != NULL_RTX; list = XEXP (list, 1))
3897 {
3898 rtx insn = XEXP (list, 0);
3899 rtx pat = PATTERN (insn);
3900 rtx src = SET_SRC (pat);
3901 rtx reg = expr->reaching_reg;
3902 rtx copy;
3903
3904 /* If we've already copied it, continue. */
3905 if (expr->reaching_reg == src)
3906 continue;
3907
3908 if (dump_file)
3909 {
3910 fprintf (dump_file, "PRE: store updated with reaching reg ");
3911 print_rtl (dump_file, reg);
3912 fprintf (dump_file, ":\n ");
3913 print_inline_rtx (dump_file, insn, 8);
3914 fprintf (dump_file, "\n");
3915 }
3916
3917 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3918 emit_insn_before (copy, insn);
3919 SET_SRC (pat) = reg;
3920 df_insn_rescan (insn);
3921
3922 /* un-recognize this pattern since it's probably different now. */
3923 INSN_CODE (insn) = -1;
3924 gcse_create_count++;
3925 }
3926 }
3927 }
3928 \f
3929 /* Return true if the graph is too expensive to optimize. PASS is the
3930 optimization about to be performed. */
3931
3932 static bool
3933 is_too_expensive (const char *pass)
3934 {
3935 /* Trying to perform global optimizations on flow graphs which have
3936 a high connectivity will take a long time and is unlikely to be
3937 particularly useful.
3938
3939 In normal circumstances a cfg should have about twice as many
3940 edges as blocks. But we do not want to punish small functions
3941 which have a couple switch statements. Rather than simply
3942 threshold the number of blocks, uses something with a more
3943 graceful degradation. */
3944 if (n_edges > 20000 + n_basic_blocks * 4)
3945 {
3946 warning (OPT_Wdisabled_optimization,
3947 "%s: %d basic blocks and %d edges/basic block",
3948 pass, n_basic_blocks, n_edges / n_basic_blocks);
3949
3950 return true;
3951 }
3952
3953 /* If allocating memory for the dataflow bitmaps would take up too much
3954 storage it's better just to disable the optimization. */
3955 if ((n_basic_blocks
3956 * SBITMAP_SET_SIZE (max_reg_num ())
3957 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
3958 {
3959 warning (OPT_Wdisabled_optimization,
3960 "%s: %d basic blocks and %d registers",
3961 pass, n_basic_blocks, max_reg_num ());
3962
3963 return true;
3964 }
3965
3966 return false;
3967 }
3968 \f
3969 /* All the passes implemented in this file. Each pass has its
3970 own gate and execute function, and at the end of the file a
3971 pass definition for passes.c.
3972
3973 We do not construct an accurate cfg in functions which call
3974 setjmp, so none of these passes runs if the function calls
3975 setjmp.
3976 FIXME: Should just handle setjmp via REG_SETJMP notes. */
3977
3978 static bool
3979 gate_rtl_pre (void)
3980 {
3981 return optimize > 0 && flag_gcse
3982 && !cfun->calls_setjmp
3983 && optimize_function_for_speed_p (cfun)
3984 && dbg_cnt (pre);
3985 }
3986
3987 static unsigned int
3988 execute_rtl_pre (void)
3989 {
3990 int changed;
3991 delete_unreachable_blocks ();
3992 df_analyze ();
3993 changed = one_pre_gcse_pass ();
3994 flag_rerun_cse_after_global_opts |= changed;
3995 if (changed)
3996 cleanup_cfg (0);
3997 return 0;
3998 }
3999
4000 static bool
4001 gate_rtl_hoist (void)
4002 {
4003 return optimize > 0 && flag_gcse
4004 && !cfun->calls_setjmp
4005 /* It does not make sense to run code hoisting unless we are optimizing
4006 for code size -- it rarely makes programs faster, and can make then
4007 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4008 && optimize_function_for_size_p (cfun)
4009 && dbg_cnt (hoist);
4010 }
4011
4012 static unsigned int
4013 execute_rtl_hoist (void)
4014 {
4015 int changed;
4016 delete_unreachable_blocks ();
4017 df_analyze ();
4018 changed = one_code_hoisting_pass ();
4019 flag_rerun_cse_after_global_opts |= changed;
4020 if (changed)
4021 cleanup_cfg (0);
4022 return 0;
4023 }
4024
4025 struct rtl_opt_pass pass_rtl_pre =
4026 {
4027 {
4028 RTL_PASS,
4029 "rtl pre", /* name */
4030 gate_rtl_pre, /* gate */
4031 execute_rtl_pre, /* execute */
4032 NULL, /* sub */
4033 NULL, /* next */
4034 0, /* static_pass_number */
4035 TV_PRE, /* tv_id */
4036 PROP_cfglayout, /* properties_required */
4037 0, /* properties_provided */
4038 0, /* properties_destroyed */
4039 0, /* todo_flags_start */
4040 TODO_df_finish | TODO_verify_rtl_sharing |
4041 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4042 }
4043 };
4044
4045 struct rtl_opt_pass pass_rtl_hoist =
4046 {
4047 {
4048 RTL_PASS,
4049 "hoist", /* name */
4050 gate_rtl_hoist, /* gate */
4051 execute_rtl_hoist, /* execute */
4052 NULL, /* sub */
4053 NULL, /* next */
4054 0, /* static_pass_number */
4055 TV_HOIST, /* tv_id */
4056 PROP_cfglayout, /* properties_required */
4057 0, /* properties_provided */
4058 0, /* properties_destroyed */
4059 0, /* todo_flags_start */
4060 TODO_df_finish | TODO_verify_rtl_sharing |
4061 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
4062 }
4063 };
4064
4065 #include "gt-gcse.h"