]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/dse.c
* MAINTAINERS: Update my email address.
[thirdparty/gcc.git] / gcc / dse.c
1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #undef BASELINE
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "tm.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "df.h"
36 #include "cselib.h"
37 #include "timevar.h"
38 #include "tree-pass.h"
39 #include "alloc-pool.h"
40 #include "alias.h"
41 #include "insn-config.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "dse.h"
45 #include "optabs.h"
46 #include "dbgcnt.h"
47
48 /* This file contains three techniques for performing Dead Store
49 Elimination (dse).
50
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
54 general addresses.
55
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
58 frame_pointer.
59
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
64
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
70
71 The pass is logically broken into 7 steps:
72
73 0) Initialization.
74
75 1) The local algorithm, as well as scanning the insns for the two
76 global algorithms.
77
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
84
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
87
88 4) Solve the dataflow equations.
89
90 5) Delete the insns that the global analysis has indicated are
91 unnecessary.
92
93 6) Cleanup.
94
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
100
101 We a simplifying assumption: addresses fall into four broad
102 categories:
103
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
108
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
111
112 The global problem is formulated as follows:
113
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
117
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
127
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
136 for details.
137
138 There are two places for further enhancements to this algorithm:
139
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
142
143 A <- r100
144 ... <- A
145
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
152
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
157
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
165 falls short:
166
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
175
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
182
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
188 way. */
189
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
195
196
197 static bitmap scratch = NULL;
198 struct insn_info;
199
200 /* This structure holds information about a candidate store. */
201 struct store_info
202 {
203
204 /* False means this is a clobber. */
205 bool is_set;
206
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
209 table. */
210 int group_id;
211
212 /* This is the cselib value. */
213 cselib_val *cse_base;
214
215 /* This canonized mem. */
216 rtx mem;
217
218 /* The result of get_addr on mem. */
219 rtx mem_addr;
220
221 /* If this is non-zero, it is the alias set of a spill location. */
222 alias_set_type alias_set;
223
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
226 int begin, end;
227
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed;
232
233 /* The next store info for this insn. */
234 struct store_info *next;
235
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
238 basic block. */
239 rtx rhs;
240 };
241
242 typedef struct store_info *store_info_t;
243 static alloc_pool cse_store_info_pool;
244 static alloc_pool rtx_store_info_pool;
245
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
248 struct read_info
249 {
250 /* The id of the mem group of the base address. */
251 int group_id;
252
253 /* If this is non-zero, it is the alias set of a spill location. */
254 alias_set_type alias_set;
255
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
259 int begin, end;
260
261 /* The mem being read. */
262 rtx mem;
263
264 /* The next read_info for this insn. */
265 struct read_info *next;
266 };
267 typedef struct read_info *read_info_t;
268 static alloc_pool read_info_pool;
269
270
271 /* One of these records is created for each insn. */
272
273 struct insn_info
274 {
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
278 volatile. */
279 bool cannot_delete;
280
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
285 bool wild_read;
286
287 /* This field is only used for the processing of const functions.
288 These functions cannot read memory, but they can read the stack
289 because that is where they may get their parms. We need to be
290 this conservative because, like the store motion pass, we don't
291 consider CALL_INSN_FUNCTION_USAGE when processing call insns.
292 Moreover, we need to distinguish two cases:
293 1. Before reload (register elimination), the stores related to
294 outgoing arguments are stack pointer based and thus deemed
295 of non-constant base in this pass. This requires special
296 handling but also means that the frame pointer based stores
297 need not be killed upon encountering a const function call.
298 2. After reload, the stores related to outgoing arguments can be
299 either stack pointer or hard frame pointer based. This means
300 that we have no other choice than also killing all the frame
301 pointer based stores upon encountering a const function call.
302 This field is set after reload for const function calls. Having
303 this set is less severe than a wild read, it just means that all
304 the frame related stores are killed rather than all the stores. */
305 bool frame_read;
306
307 /* This field is only used for the processing of const functions.
308 It is set if the insn may contain a stack pointer based store. */
309 bool stack_pointer_based;
310
311 /* This is true if any of the sets within the store contains a
312 cselib base. Such stores can only be deleted by the local
313 algorithm. */
314 bool contains_cselib_groups;
315
316 /* The insn. */
317 rtx insn;
318
319 /* The list of mem sets or mem clobbers that are contained in this
320 insn. If the insn is deletable, it contains only one mem set.
321 But it could also contain clobbers. Insns that contain more than
322 one mem set are not deletable, but each of those mems are here in
323 order to provide info to delete other insns. */
324 store_info_t store_rec;
325
326 /* The linked list of mem uses in this insn. Only the reads from
327 rtx bases are listed here. The reads to cselib bases are
328 completely processed during the first scan and so are never
329 created. */
330 read_info_t read_rec;
331
332 /* The prev insn in the basic block. */
333 struct insn_info * prev_insn;
334
335 /* The linked list of insns that are in consideration for removal in
336 the forwards pass thru the basic block. This pointer may be
337 trash as it is not cleared when a wild read occurs. The only
338 time it is guaranteed to be correct is when the traveral starts
339 at active_local_stores. */
340 struct insn_info * next_local_store;
341 };
342
343 typedef struct insn_info *insn_info_t;
344 static alloc_pool insn_info_pool;
345
346 /* The linked list of stores that are under consideration in this
347 basic block. */
348 static insn_info_t active_local_stores;
349
350 struct bb_info
351 {
352
353 /* Pointer to the insn info for the last insn in the block. These
354 are linked so this is how all of the insns are reached. During
355 scanning this is the current insn being scanned. */
356 insn_info_t last_insn;
357
358 /* The info for the global dataflow problem. */
359
360
361 /* This is set if the transfer function should and in the wild_read
362 bitmap before applying the kill and gen sets. That vector knocks
363 out most of the bits in the bitmap and thus speeds up the
364 operations. */
365 bool apply_wild_read;
366
367 /* The set of store positions that exist in this block before a wild read. */
368 bitmap gen;
369
370 /* The set of load positions that exist in this block above the
371 same position of a store. */
372 bitmap kill;
373
374 /* The set of stores that reach the top of the block without being
375 killed by a read.
376
377 Do not represent the in if it is all ones. Note that this is
378 what the bitvector should logically be initialized to for a set
379 intersection problem. However, like the kill set, this is too
380 expensive. So initially, the in set will only be created for the
381 exit block and any block that contains a wild read. */
382 bitmap in;
383
384 /* The set of stores that reach the bottom of the block from it's
385 successors.
386
387 Do not represent the in if it is all ones. Note that this is
388 what the bitvector should logically be initialized to for a set
389 intersection problem. However, like the kill and in set, this is
390 too expensive. So what is done is that the confluence operator
391 just initializes the vector from one of the out sets of the
392 successors of the block. */
393 bitmap out;
394 };
395
396 typedef struct bb_info *bb_info_t;
397 static alloc_pool bb_info_pool;
398
399 /* Table to hold all bb_infos. */
400 static bb_info_t *bb_table;
401
402 /* There is a group_info for each rtx base that is used to reference
403 memory. There are also not many of the rtx bases because they are
404 very limited in scope. */
405
406 struct group_info
407 {
408 /* The actual base of the address. */
409 rtx rtx_base;
410
411 /* The sequential id of the base. This allows us to have a
412 canonical ordering of these that is not based on addresses. */
413 int id;
414
415 /* A mem wrapped around the base pointer for the group in order to
416 do read dependency. */
417 rtx base_mem;
418
419 /* Canonized version of base_mem, most likely the same thing. */
420 rtx canon_base_mem;
421
422 /* These two sets of two bitmaps are used to keep track of how many
423 stores are actually referencing that position from this base. We
424 only do this for rtx bases as this will be used to assign
425 positions in the bitmaps for the global problem. Bit N is set in
426 store1 on the first store for offset N. Bit N is set in store2
427 for the second store to offset N. This is all we need since we
428 only care about offsets that have two or more stores for them.
429
430 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
431 for 0 and greater offsets.
432
433 There is one special case here, for stores into the stack frame,
434 we will or store1 into store2 before deciding which stores look
435 at globally. This is because stores to the stack frame that have
436 no other reads before the end of the function can also be
437 deleted. */
438 bitmap store1_n, store1_p, store2_n, store2_p;
439
440 /* The positions in this bitmap have the same assignments as the in,
441 out, gen and kill bitmaps. This bitmap is all zeros except for
442 the positions that are occupied by stores for this group. */
443 bitmap group_kill;
444
445 /* True if there are any positions that are to be processed
446 globally. */
447 bool process_globally;
448
449 /* True if the base of this group is either the frame_pointer or
450 hard_frame_pointer. */
451 bool frame_related;
452
453 /* The offset_map is used to map the offsets from this base into
454 positions in the global bitmaps. It is only created after all of
455 the all of stores have been scanned and we know which ones we
456 care about. */
457 int *offset_map_n, *offset_map_p;
458 int offset_map_size_n, offset_map_size_p;
459 };
460 typedef struct group_info *group_info_t;
461 typedef const struct group_info *const_group_info_t;
462 static alloc_pool rtx_group_info_pool;
463
464 /* Tables of group_info structures, hashed by base value. */
465 static htab_t rtx_group_table;
466
467 /* Index into the rtx_group_vec. */
468 static int rtx_group_next_id;
469
470 DEF_VEC_P(group_info_t);
471 DEF_VEC_ALLOC_P(group_info_t,heap);
472
473 static VEC(group_info_t,heap) *rtx_group_vec;
474
475
476 /* This structure holds the set of changes that are being deferred
477 when removing read operation. See replace_read. */
478 struct deferred_change
479 {
480
481 /* The mem that is being replaced. */
482 rtx *loc;
483
484 /* The reg it is being replaced with. */
485 rtx reg;
486
487 struct deferred_change *next;
488 };
489
490 typedef struct deferred_change *deferred_change_t;
491 static alloc_pool deferred_change_pool;
492
493 static deferred_change_t deferred_change_list = NULL;
494
495 /* This are used to hold the alias sets of spill variables. Since
496 these are never aliased and there may be a lot of them, it makes
497 sense to treat them specially. This bitvector is only allocated in
498 calls from dse_record_singleton_alias_set which currently is only
499 made during reload1. So when dse is called before reload this
500 mechanism does nothing. */
501
502 static bitmap clear_alias_sets = NULL;
503
504 /* The set of clear_alias_sets that have been disqualified because
505 there are loads or stores using a different mode than the alias set
506 was registered with. */
507 static bitmap disqualified_clear_alias_sets = NULL;
508
509 /* The group that holds all of the clear_alias_sets. */
510 static group_info_t clear_alias_group;
511
512 /* The modes of the clear_alias_sets. */
513 static htab_t clear_alias_mode_table;
514
515 /* Hash table element to look up the mode for an alias set. */
516 struct clear_alias_mode_holder
517 {
518 alias_set_type alias_set;
519 enum machine_mode mode;
520 };
521
522 static alloc_pool clear_alias_mode_pool;
523
524 /* This is true except for two cases:
525 (1) current_function_stdarg -- i.e. we cannot do this
526 for vararg functions because they play games with the frame.
527 (2) In ada, it is sometimes not safe to do assume that any stores
528 based off the stack frame go dead at the exit to a function. */
529 static bool stores_off_frame_dead_at_return;
530
531 /* Counter for stats. */
532 static int globally_deleted;
533 static int locally_deleted;
534 static int spill_deleted;
535
536 static bitmap all_blocks;
537
538 /* The number of bits used in the global bitmaps. */
539 static unsigned int current_position;
540
541
542 static bool gate_dse (void);
543 static bool gate_dse1 (void);
544 static bool gate_dse2 (void);
545
546 \f
547 /*----------------------------------------------------------------------------
548 Zeroth step.
549
550 Initialization.
551 ----------------------------------------------------------------------------*/
552
553 /* Hashtable callbacks for maintaining the "bases" field of
554 store_group_info, given that the addresses are function invariants. */
555
556 static int
557 clear_alias_mode_eq (const void *p1, const void *p2)
558 {
559 const struct clear_alias_mode_holder * h1
560 = (const struct clear_alias_mode_holder *) p1;
561 const struct clear_alias_mode_holder * h2
562 = (const struct clear_alias_mode_holder *) p2;
563 return h1->alias_set == h2->alias_set;
564 }
565
566
567 static hashval_t
568 clear_alias_mode_hash (const void *p)
569 {
570 const struct clear_alias_mode_holder *holder
571 = (const struct clear_alias_mode_holder *) p;
572 return holder->alias_set;
573 }
574
575
576 /* Find the entry associated with ALIAS_SET. */
577
578 static struct clear_alias_mode_holder *
579 clear_alias_set_lookup (alias_set_type alias_set)
580 {
581 struct clear_alias_mode_holder tmp_holder;
582 void **slot;
583
584 tmp_holder.alias_set = alias_set;
585 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
586 gcc_assert (*slot);
587
588 return *slot;
589 }
590
591
592 /* Hashtable callbacks for maintaining the "bases" field of
593 store_group_info, given that the addresses are function invariants. */
594
595 static int
596 invariant_group_base_eq (const void *p1, const void *p2)
597 {
598 const_group_info_t gi1 = (const_group_info_t) p1;
599 const_group_info_t gi2 = (const_group_info_t) p2;
600 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
601 }
602
603
604 static hashval_t
605 invariant_group_base_hash (const void *p)
606 {
607 const_group_info_t gi = (const_group_info_t) p;
608 int do_not_record;
609 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
610 }
611
612
613 /* Get the GROUP for BASE. Add a new group if it is not there. */
614
615 static group_info_t
616 get_group_info (rtx base)
617 {
618 struct group_info tmp_gi;
619 group_info_t gi;
620 void **slot;
621
622 if (base)
623 {
624 /* Find the store_base_info structure for BASE, creating a new one
625 if necessary. */
626 tmp_gi.rtx_base = base;
627 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
628 gi = (group_info_t) *slot;
629 }
630 else
631 {
632 if (!clear_alias_group)
633 {
634 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
635 memset (gi, 0, sizeof (struct group_info));
636 gi->id = rtx_group_next_id++;
637 gi->store1_n = BITMAP_ALLOC (NULL);
638 gi->store1_p = BITMAP_ALLOC (NULL);
639 gi->store2_n = BITMAP_ALLOC (NULL);
640 gi->store2_p = BITMAP_ALLOC (NULL);
641 gi->group_kill = BITMAP_ALLOC (NULL);
642 gi->process_globally = false;
643 gi->offset_map_size_n = 0;
644 gi->offset_map_size_p = 0;
645 gi->offset_map_n = NULL;
646 gi->offset_map_p = NULL;
647 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
648 }
649 return clear_alias_group;
650 }
651
652 if (gi == NULL)
653 {
654 *slot = gi = pool_alloc (rtx_group_info_pool);
655 gi->rtx_base = base;
656 gi->id = rtx_group_next_id++;
657 gi->base_mem = gen_rtx_MEM (QImode, base);
658 gi->canon_base_mem = canon_rtx (gi->base_mem);
659 gi->store1_n = BITMAP_ALLOC (NULL);
660 gi->store1_p = BITMAP_ALLOC (NULL);
661 gi->store2_n = BITMAP_ALLOC (NULL);
662 gi->store2_p = BITMAP_ALLOC (NULL);
663 gi->group_kill = BITMAP_ALLOC (NULL);
664 gi->process_globally = false;
665 gi->frame_related =
666 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
667 gi->offset_map_size_n = 0;
668 gi->offset_map_size_p = 0;
669 gi->offset_map_n = NULL;
670 gi->offset_map_p = NULL;
671 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
672 }
673
674 return gi;
675 }
676
677
678 /* Initialization of data structures. */
679
680 static void
681 dse_step0 (void)
682 {
683 locally_deleted = 0;
684 globally_deleted = 0;
685 spill_deleted = 0;
686
687 scratch = BITMAP_ALLOC (NULL);
688
689 rtx_store_info_pool
690 = create_alloc_pool ("rtx_store_info_pool",
691 sizeof (struct store_info), 100);
692 read_info_pool
693 = create_alloc_pool ("read_info_pool",
694 sizeof (struct read_info), 100);
695 insn_info_pool
696 = create_alloc_pool ("insn_info_pool",
697 sizeof (struct insn_info), 100);
698 bb_info_pool
699 = create_alloc_pool ("bb_info_pool",
700 sizeof (struct bb_info), 100);
701 rtx_group_info_pool
702 = create_alloc_pool ("rtx_group_info_pool",
703 sizeof (struct group_info), 100);
704 deferred_change_pool
705 = create_alloc_pool ("deferred_change_pool",
706 sizeof (struct deferred_change), 10);
707
708 rtx_group_table = htab_create (11, invariant_group_base_hash,
709 invariant_group_base_eq, NULL);
710
711 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
712 rtx_group_next_id = 0;
713
714 stores_off_frame_dead_at_return =
715 (!(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE
716 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))
717 && (!current_function_stdarg);
718
719 init_alias_analysis ();
720
721 if (clear_alias_sets)
722 clear_alias_group = get_group_info (NULL);
723 else
724 clear_alias_group = NULL;
725 }
726
727
728 \f
729 /*----------------------------------------------------------------------------
730 First step.
731
732 Scan all of the insns. Any random ordering of the blocks is fine.
733 Each block is scanned in forward order to accommodate cselib which
734 is used to remove stores with non-constant bases.
735 ----------------------------------------------------------------------------*/
736
737 /* Delete all of the store_info recs from INSN_INFO. */
738
739 static void
740 free_store_info (insn_info_t insn_info)
741 {
742 store_info_t store_info = insn_info->store_rec;
743 while (store_info)
744 {
745 store_info_t next = store_info->next;
746 if (store_info->cse_base)
747 pool_free (cse_store_info_pool, store_info);
748 else
749 pool_free (rtx_store_info_pool, store_info);
750 store_info = next;
751 }
752
753 insn_info->cannot_delete = true;
754 insn_info->contains_cselib_groups = false;
755 insn_info->store_rec = NULL;
756 }
757
758
759 struct insn_size {
760 int size;
761 rtx insn;
762 };
763
764
765 /* Add an insn to do the add inside a x if it is a
766 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
767 the size of the mode of the MEM that this is inside of. */
768
769 static int
770 replace_inc_dec (rtx *r, void *d)
771 {
772 rtx x = *r;
773 struct insn_size *data = (struct insn_size *)d;
774 switch (GET_CODE (x))
775 {
776 case PRE_INC:
777 case POST_INC:
778 {
779 rtx r1 = XEXP (x, 0);
780 rtx c = gen_int_mode (Pmode, data->size);
781 add_insn_before (data->insn,
782 gen_rtx_SET (Pmode, r1,
783 gen_rtx_PLUS (Pmode, r1, c)),
784 NULL);
785 return -1;
786 }
787
788 case PRE_DEC:
789 case POST_DEC:
790 {
791 rtx r1 = XEXP (x, 0);
792 rtx c = gen_int_mode (Pmode, -data->size);
793 add_insn_before (data->insn,
794 gen_rtx_SET (Pmode, r1,
795 gen_rtx_PLUS (Pmode, r1, c)),
796 NULL);
797 return -1;
798 }
799
800 case PRE_MODIFY:
801 case POST_MODIFY:
802 {
803 /* We can reuse the add because we are about to delete the
804 insn that contained it. */
805 rtx add = XEXP (x, 0);
806 rtx r1 = XEXP (add, 0);
807 add_insn_before (data->insn,
808 gen_rtx_SET (Pmode, r1, add), NULL);
809 return -1;
810 }
811
812 default:
813 return 0;
814 }
815 }
816
817
818 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
819 and generate an add to replace that. */
820
821 static int
822 replace_inc_dec_mem (rtx *r, void *d)
823 {
824 rtx x = *r;
825 if (GET_CODE (x) == MEM)
826 {
827 struct insn_size data;
828
829 data.size = GET_MODE_SIZE (GET_MODE (x));
830 data.insn = (rtx)d;
831
832 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
833
834 return -1;
835 }
836 return 0;
837 }
838
839 /* Before we delete INSN, make sure that the auto inc/dec, if it is
840 there, is split into a separate insn. */
841
842 static void
843 check_for_inc_dec (rtx insn)
844 {
845 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
846 if (note)
847 for_each_rtx (&insn, replace_inc_dec_mem, insn);
848 }
849
850
851 /* Delete the insn and free all of the fields inside INSN_INFO. */
852
853 static void
854 delete_dead_store_insn (insn_info_t insn_info)
855 {
856 read_info_t read_info;
857
858 if (!dbg_cnt (dse))
859 return;
860
861 check_for_inc_dec (insn_info->insn);
862 if (dump_file)
863 {
864 fprintf (dump_file, "Locally deleting insn %d ",
865 INSN_UID (insn_info->insn));
866 if (insn_info->store_rec->alias_set)
867 fprintf (dump_file, "alias set %d\n",
868 (int) insn_info->store_rec->alias_set);
869 else
870 fprintf (dump_file, "\n");
871 }
872
873 free_store_info (insn_info);
874 read_info = insn_info->read_rec;
875
876 while (read_info)
877 {
878 read_info_t next = read_info->next;
879 pool_free (read_info_pool, read_info);
880 read_info = next;
881 }
882 insn_info->read_rec = NULL;
883
884 delete_insn (insn_info->insn);
885 locally_deleted++;
886 insn_info->insn = NULL;
887
888 insn_info->wild_read = false;
889 }
890
891
892 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
893 OFFSET and WIDTH. */
894
895 static void
896 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
897 {
898 HOST_WIDE_INT i;
899
900 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
901 for (i=offset; i<offset+width; i++)
902 {
903 bitmap store1;
904 bitmap store2;
905 int ai;
906 if (i < 0)
907 {
908 store1 = group->store1_n;
909 store2 = group->store2_n;
910 ai = -i;
911 }
912 else
913 {
914 store1 = group->store1_p;
915 store2 = group->store2_p;
916 ai = i;
917 }
918
919 if (bitmap_bit_p (store1, ai))
920 bitmap_set_bit (store2, ai);
921 else
922 {
923 bitmap_set_bit (store1, ai);
924 if (i < 0)
925 {
926 if (group->offset_map_size_n < ai)
927 group->offset_map_size_n = ai;
928 }
929 else
930 {
931 if (group->offset_map_size_p < ai)
932 group->offset_map_size_p = ai;
933 }
934 }
935 }
936 }
937
938
939 /* Set the BB_INFO so that the last insn is marked as a wild read. */
940
941 static void
942 add_wild_read (bb_info_t bb_info)
943 {
944 insn_info_t insn_info = bb_info->last_insn;
945 read_info_t *ptr = &insn_info->read_rec;
946
947 while (*ptr)
948 {
949 read_info_t next = (*ptr)->next;
950 if ((*ptr)->alias_set == 0)
951 {
952 pool_free (read_info_pool, *ptr);
953 *ptr = next;
954 }
955 else
956 ptr = &(*ptr)->next;
957 }
958 insn_info->wild_read = true;
959 active_local_stores = NULL;
960 }
961
962
963 /* Return true if X is a constant or one of the registers that behave
964 as a constant over the life of a function. This is equivalent to
965 !rtx_varies_p for memory addresses. */
966
967 static bool
968 const_or_frame_p (rtx x)
969 {
970 switch (GET_CODE (x))
971 {
972 case MEM:
973 return MEM_READONLY_P (x);
974
975 case CONST:
976 case CONST_INT:
977 case CONST_DOUBLE:
978 case CONST_VECTOR:
979 case SYMBOL_REF:
980 case LABEL_REF:
981 return true;
982
983 case REG:
984 /* Note that we have to test for the actual rtx used for the frame
985 and arg pointers and not just the register number in case we have
986 eliminated the frame and/or arg pointer and are using it
987 for pseudos. */
988 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
989 /* The arg pointer varies if it is not a fixed register. */
990 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
991 || x == pic_offset_table_rtx)
992 return true;
993 return false;
994
995 default:
996 return false;
997 }
998 }
999
1000 /* Take all reasonable action to put the address of MEM into the form
1001 that we can do analysis on.
1002
1003 The gold standard is to get the address into the form: address +
1004 OFFSET where address is something that rtx_varies_p considers a
1005 constant. When we can get the address in this form, we can do
1006 global analysis on it. Note that for constant bases, address is
1007 not actually returned, only the group_id. The address can be
1008 obtained from that.
1009
1010 If that fails, we try cselib to get a value we can at least use
1011 locally. If that fails we return false.
1012
1013 The GROUP_ID is set to -1 for cselib bases and the index of the
1014 group for non_varying bases.
1015
1016 FOR_READ is true if this is a mem read and false if not. */
1017
1018 static bool
1019 canon_address (rtx mem,
1020 alias_set_type *alias_set_out,
1021 int *group_id,
1022 HOST_WIDE_INT *offset,
1023 cselib_val **base)
1024 {
1025 rtx mem_address = XEXP (mem, 0);
1026 rtx expanded_address, address;
1027 /* Make sure that cselib is has initialized all of the operands of
1028 the address before asking it to do the subst. */
1029
1030 if (clear_alias_sets)
1031 {
1032 /* If this is a spill, do not do any further processing. */
1033 alias_set_type alias_set = MEM_ALIAS_SET (mem);
1034 if (dump_file)
1035 fprintf (dump_file, "found alias set %d\n", (int) alias_set);
1036 if (bitmap_bit_p (clear_alias_sets, alias_set))
1037 {
1038 struct clear_alias_mode_holder *entry
1039 = clear_alias_set_lookup (alias_set);
1040
1041 /* If the modes do not match, we cannot process this set. */
1042 if (entry->mode != GET_MODE (mem))
1043 {
1044 if (dump_file)
1045 fprintf (dump_file,
1046 "disqualifying alias set %d, (%s) != (%s)\n",
1047 (int) alias_set, GET_MODE_NAME (entry->mode),
1048 GET_MODE_NAME (GET_MODE (mem)));
1049
1050 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1051 return false;
1052 }
1053
1054 *alias_set_out = alias_set;
1055 *group_id = clear_alias_group->id;
1056 return true;
1057 }
1058 }
1059
1060 *alias_set_out = 0;
1061
1062 cselib_lookup (mem_address, Pmode, 1);
1063
1064 if (dump_file)
1065 {
1066 fprintf (dump_file, " mem: ");
1067 print_inline_rtx (dump_file, mem_address, 0);
1068 fprintf (dump_file, "\n");
1069 }
1070
1071 /* Use cselib to replace all of the reg references with the full
1072 expression. This will take care of the case where we have
1073
1074 r_x = base + offset;
1075 val = *r_x;
1076
1077 by making it into
1078
1079 val = *(base + offset);
1080 */
1081
1082 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1083
1084 /* If this fails, just go with the mem_address. */
1085 if (!expanded_address)
1086 expanded_address = mem_address;
1087
1088 /* Split the address into canonical BASE + OFFSET terms. */
1089 address = canon_rtx (expanded_address);
1090
1091 *offset = 0;
1092
1093 if (dump_file)
1094 {
1095 fprintf (dump_file, "\n after cselib_expand address: ");
1096 print_inline_rtx (dump_file, expanded_address, 0);
1097 fprintf (dump_file, "\n");
1098
1099 fprintf (dump_file, "\n after canon_rtx address: ");
1100 print_inline_rtx (dump_file, address, 0);
1101 fprintf (dump_file, "\n");
1102 }
1103
1104 if (GET_CODE (address) == CONST)
1105 address = XEXP (address, 0);
1106
1107 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1108 {
1109 *offset = INTVAL (XEXP (address, 1));
1110 address = XEXP (address, 0);
1111 }
1112
1113 if (const_or_frame_p (address))
1114 {
1115 group_info_t group = get_group_info (address);
1116
1117 if (dump_file)
1118 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1119 *base = NULL;
1120 *group_id = group->id;
1121 }
1122 else
1123 {
1124 *base = cselib_lookup (address, Pmode, true);
1125 *group_id = -1;
1126
1127 if (*base == NULL)
1128 {
1129 if (dump_file)
1130 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1131 return false;
1132 }
1133 if (dump_file)
1134 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1135 (*base)->value, (int)*offset);
1136 }
1137 return true;
1138 }
1139
1140
1141 /* Clear the rhs field from the active_local_stores array. */
1142
1143 static void
1144 clear_rhs_from_active_local_stores (void)
1145 {
1146 insn_info_t ptr = active_local_stores;
1147
1148 while (ptr)
1149 {
1150 store_info_t store_info = ptr->store_rec;
1151 /* Skip the clobbers. */
1152 while (!store_info->is_set)
1153 store_info = store_info->next;
1154
1155 store_info->rhs = NULL;
1156
1157 ptr = ptr->next_local_store;
1158 }
1159 }
1160
1161
1162 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1163 there is a candidate store, after adding it to the appropriate
1164 local store group if so. */
1165
1166 static int
1167 record_store (rtx body, bb_info_t bb_info)
1168 {
1169 rtx mem;
1170 HOST_WIDE_INT offset = 0;
1171 HOST_WIDE_INT width = 0;
1172 alias_set_type spill_alias_set;
1173 insn_info_t insn_info = bb_info->last_insn;
1174 store_info_t store_info = NULL;
1175 int group_id;
1176 cselib_val *base = NULL;
1177 insn_info_t ptr, last;
1178 bool store_is_unused;
1179
1180 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1181 return 0;
1182
1183 /* If this is not used, then this cannot be used to keep the insn
1184 from being deleted. On the other hand, it does provide something
1185 that can be used to prove that another store is dead. */
1186 store_is_unused
1187 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1188
1189 /* Check whether that value is a suitable memory location. */
1190 mem = SET_DEST (body);
1191 if (!MEM_P (mem))
1192 {
1193 /* If the set or clobber is unused, then it does not effect our
1194 ability to get rid of the entire insn. */
1195 if (!store_is_unused)
1196 insn_info->cannot_delete = true;
1197 return 0;
1198 }
1199
1200 /* At this point we know mem is a mem. */
1201 if (GET_MODE (mem) == BLKmode)
1202 {
1203 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1204 {
1205 if (dump_file)
1206 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1207 add_wild_read (bb_info);
1208 insn_info->cannot_delete = true;
1209 }
1210 else if (!store_is_unused)
1211 {
1212 /* If the set or clobber is unused, then it does not effect our
1213 ability to get rid of the entire insn. */
1214 insn_info->cannot_delete = true;
1215 clear_rhs_from_active_local_stores ();
1216 }
1217 return 0;
1218 }
1219
1220 /* We can still process a volatile mem, we just cannot delete it. */
1221 if (MEM_VOLATILE_P (mem))
1222 insn_info->cannot_delete = true;
1223
1224 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1225 {
1226 clear_rhs_from_active_local_stores ();
1227 return 0;
1228 }
1229
1230 width = GET_MODE_SIZE (GET_MODE (mem));
1231
1232 if (spill_alias_set)
1233 {
1234 bitmap store1 = clear_alias_group->store1_p;
1235 bitmap store2 = clear_alias_group->store2_p;
1236
1237 if (bitmap_bit_p (store1, spill_alias_set))
1238 bitmap_set_bit (store2, spill_alias_set);
1239 else
1240 bitmap_set_bit (store1, spill_alias_set);
1241
1242 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1243 clear_alias_group->offset_map_size_p = spill_alias_set;
1244
1245 store_info = pool_alloc (rtx_store_info_pool);
1246
1247 if (dump_file)
1248 fprintf (dump_file, " processing spill store %d(%s)\n",
1249 (int) spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1250 }
1251 else if (group_id >= 0)
1252 {
1253 /* In the restrictive case where the base is a constant or the
1254 frame pointer we can do global analysis. */
1255
1256 group_info_t group
1257 = VEC_index (group_info_t, rtx_group_vec, group_id);
1258
1259 store_info = pool_alloc (rtx_store_info_pool);
1260 set_usage_bits (group, offset, width);
1261
1262 if (dump_file)
1263 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1264 group_id, (int)offset, (int)(offset+width));
1265 }
1266 else
1267 {
1268 rtx base_term = find_base_term (XEXP (mem, 0));
1269 if (!base_term
1270 || (GET_CODE (base_term) == ADDRESS
1271 && GET_MODE (base_term) == Pmode
1272 && XEXP (base_term, 0) == stack_pointer_rtx))
1273 insn_info->stack_pointer_based = true;
1274 insn_info->contains_cselib_groups = true;
1275
1276 store_info = pool_alloc (cse_store_info_pool);
1277 group_id = -1;
1278
1279 if (dump_file)
1280 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1281 (int)offset, (int)(offset+width));
1282 }
1283
1284 /* Check to see if this stores causes some other stores to be
1285 dead. */
1286 ptr = active_local_stores;
1287 last = NULL;
1288
1289 while (ptr)
1290 {
1291 insn_info_t next = ptr->next_local_store;
1292 store_info_t s_info = ptr->store_rec;
1293 bool delete = true;
1294
1295 /* Skip the clobbers. We delete the active insn if this insn
1296 shadows the set. To have been put on the active list, it
1297 has exactly on set. */
1298 while (!s_info->is_set)
1299 s_info = s_info->next;
1300
1301 if (s_info->alias_set != spill_alias_set)
1302 delete = false;
1303 else if (s_info->alias_set)
1304 {
1305 struct clear_alias_mode_holder *entry
1306 = clear_alias_set_lookup (s_info->alias_set);
1307 /* Generally, spills cannot be processed if and of the
1308 references to the slot have a different mode. But if
1309 we are in the same block and mode is exactly the same
1310 between this store and one before in the same block,
1311 we can still delete it. */
1312 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1313 && (GET_MODE (mem) == entry->mode))
1314 {
1315 delete = true;
1316 s_info->positions_needed = 0;
1317 }
1318 if (dump_file)
1319 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1320 INSN_UID (ptr->insn), (int) s_info->alias_set);
1321 }
1322 else if ((s_info->group_id == group_id)
1323 && (s_info->cse_base == base))
1324 {
1325 HOST_WIDE_INT i;
1326 if (dump_file)
1327 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1328 INSN_UID (ptr->insn), s_info->group_id,
1329 (int)s_info->begin, (int)s_info->end);
1330 for (i = offset; i < offset+width; i++)
1331 if (i >= s_info->begin && i < s_info->end)
1332 s_info->positions_needed &= ~(1L << (i - s_info->begin));
1333 }
1334 else if (s_info->rhs)
1335 /* Need to see if it is possible for this store to overwrite
1336 the value of store_info. If it is, set the rhs to NULL to
1337 keep it from being used to remove a load. */
1338 {
1339 if (canon_true_dependence (s_info->mem,
1340 GET_MODE (s_info->mem),
1341 s_info->mem_addr,
1342 mem, rtx_varies_p))
1343 s_info->rhs = NULL;
1344 }
1345
1346 /* An insn can be deleted if every position of every one of
1347 its s_infos is zero. */
1348 if (s_info->positions_needed != 0)
1349 delete = false;
1350
1351 if (delete)
1352 {
1353 insn_info_t insn_to_delete = ptr;
1354
1355 if (last)
1356 last->next_local_store = ptr->next_local_store;
1357 else
1358 active_local_stores = ptr->next_local_store;
1359
1360 delete_dead_store_insn (insn_to_delete);
1361 }
1362 else
1363 last = ptr;
1364
1365 ptr = next;
1366 }
1367
1368 gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT);
1369
1370 /* Finish filling in the store_info. */
1371 store_info->next = insn_info->store_rec;
1372 insn_info->store_rec = store_info;
1373 store_info->mem = canon_rtx (mem);
1374 store_info->alias_set = spill_alias_set;
1375 store_info->mem_addr = get_addr (XEXP (mem, 0));
1376 store_info->cse_base = base;
1377 store_info->positions_needed = (1L << width) - 1;
1378 store_info->group_id = group_id;
1379 store_info->begin = offset;
1380 store_info->end = offset + width;
1381 store_info->is_set = GET_CODE (body) == SET;
1382
1383 if (store_info->is_set
1384 /* No place to keep the value after ra. */
1385 && !reload_completed
1386 /* The careful reviewer may wish to comment my checking that the
1387 rhs of a store is always a reg. */
1388 && REG_P (SET_SRC (body))
1389 /* Sometimes the store and reload is used for truncation and
1390 rounding. */
1391 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1392 store_info->rhs = SET_SRC (body);
1393 else
1394 store_info->rhs = NULL;
1395
1396 /* If this is a clobber, we return 0. We will only be able to
1397 delete this insn if there is only one store USED store, but we
1398 can use the clobber to delete other stores earlier. */
1399 return store_info->is_set ? 1 : 0;
1400 }
1401
1402
1403 static void
1404 dump_insn_info (const char * start, insn_info_t insn_info)
1405 {
1406 fprintf (dump_file, "%s insn=%d %s\n", start,
1407 INSN_UID (insn_info->insn),
1408 insn_info->store_rec ? "has store" : "naked");
1409 }
1410
1411
1412 /* If the modes are different and the value's source and target do not
1413 line up, we need to extract the value from lower part of the rhs of
1414 the store, shift it, and then put it into a form that can be shoved
1415 into the read_insn. This function generates a right SHIFT of a
1416 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1417 shift sequence is returned or NULL if we failed to find a
1418 shift. */
1419
1420 static rtx
1421 find_shift_sequence (rtx read_reg,
1422 int access_size,
1423 store_info_t store_info,
1424 read_info_t read_info,
1425 int shift)
1426 {
1427 enum machine_mode store_mode = GET_MODE (store_info->mem);
1428 enum machine_mode read_mode = GET_MODE (read_info->mem);
1429 rtx chosen_seq = NULL;
1430
1431 /* Some machines like the x86 have shift insns for each size of
1432 operand. Other machines like the ppc or the ia-64 may only have
1433 shift insns that shift values within 32 or 64 bit registers.
1434 This loop tries to find the smallest shift insn that will right
1435 justify the value we want to read but is available in one insn on
1436 the machine. */
1437
1438 for (; access_size <= UNITS_PER_WORD; access_size *= 2)
1439 {
1440 rtx target, new_reg, shift_seq, insn;
1441 enum machine_mode new_mode;
1442 int cost;
1443
1444 /* Try a wider mode if truncating the store mode to ACCESS_SIZE
1445 bytes requires a real instruction. */
1446 if (access_size < GET_MODE_SIZE (store_mode)
1447 && !TRULY_NOOP_TRUNCATION (access_size * BITS_PER_UNIT,
1448 GET_MODE_BITSIZE (store_mode)))
1449 continue;
1450
1451 new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
1452 MODE_INT);
1453 new_reg = gen_reg_rtx (new_mode);
1454
1455 start_sequence ();
1456
1457 /* In theory we could also check for an ashr. Ian Taylor knows
1458 of one dsp where the cost of these two was not the same. But
1459 this really is a rare case anyway. */
1460 target = expand_binop (new_mode, lshr_optab, new_reg,
1461 GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
1462
1463 shift_seq = get_insns ();
1464 end_sequence ();
1465
1466 if (target != new_reg || shift_seq == NULL)
1467 continue;
1468
1469 cost = 0;
1470 for (insn = shift_seq; insn != NULL_RTX; insn = NEXT_INSN (insn))
1471 if (INSN_P (insn))
1472 cost += insn_rtx_cost (PATTERN (insn));
1473
1474 /* The computation up to here is essentially independent
1475 of the arguments and could be precomputed. It may
1476 not be worth doing so. We could precompute if
1477 worthwhile or at least cache the results. The result
1478 technically depends on both SHIFT and ACCESS_SIZE,
1479 but in practice the answer will depend only on ACCESS_SIZE. */
1480
1481 if (cost > COSTS_N_INSNS (1))
1482 continue;
1483
1484 /* We found an acceptable shift. Generate a move to
1485 take the value from the store and put it into the
1486 shift pseudo, then shift it, then generate another
1487 move to put in into the target of the read. */
1488 start_sequence ();
1489 emit_move_insn (new_reg, gen_lowpart (new_mode, store_info->rhs));
1490 emit_insn (shift_seq);
1491 convert_move (read_reg, new_reg, 1);
1492
1493 if (dump_file)
1494 {
1495 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1496 REGNO (new_reg), GET_MODE_NAME (new_mode),
1497 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1498
1499 fprintf (dump_file, " -- with shift of r%d by %d\n",
1500 REGNO(new_reg), shift);
1501 fprintf (dump_file, " -- and second extract insn r%d:%s = r%d:%s\n",
1502 REGNO (read_reg), GET_MODE_NAME (read_mode),
1503 REGNO (new_reg), GET_MODE_NAME (new_mode));
1504 }
1505
1506 /* Get the three insn sequence and return it. */
1507 chosen_seq = get_insns ();
1508 end_sequence ();
1509 break;
1510 }
1511
1512 return chosen_seq;
1513 }
1514
1515
1516 /* Take a sequence of:
1517 A <- r1
1518 ...
1519 ... <- A
1520
1521 and change it into
1522 r2 <- r1
1523 A <- r1
1524 ...
1525 ... <- r2
1526
1527 or
1528
1529 r3 <- extract (r1)
1530 r3 <- r3 >> shift
1531 r2 <- extract (r3)
1532 ... <- r2
1533
1534 or
1535
1536 r2 <- extract (r1)
1537 ... <- r2
1538
1539 Depending on the alignment and the mode of the store and
1540 subsequent load.
1541
1542
1543 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1544 and READ_INSN are for the read. Return true if the replacement
1545 went ok. */
1546
1547 static bool
1548 replace_read (store_info_t store_info, insn_info_t store_insn,
1549 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1550 {
1551 enum machine_mode store_mode = GET_MODE (store_info->mem);
1552 enum machine_mode read_mode = GET_MODE (read_info->mem);
1553 int shift;
1554 int access_size; /* In bytes. */
1555 rtx read_reg = gen_reg_rtx (read_mode);
1556 rtx shift_seq = NULL;
1557
1558 if (!dbg_cnt (dse))
1559 return false;
1560
1561 if (GET_MODE_CLASS (read_mode) != MODE_INT
1562 || GET_MODE_CLASS (store_mode) != MODE_INT)
1563 return false;
1564
1565 /* To get here the read is within the boundaries of the write so
1566 shift will never be negative. Start out with the shift being in
1567 bytes. */
1568 if (BYTES_BIG_ENDIAN)
1569 shift = store_info->end - read_info->end;
1570 else
1571 shift = read_info->begin - store_info->begin;
1572
1573 access_size = shift + GET_MODE_SIZE (read_mode);
1574
1575 /* From now on it is bits. */
1576 shift *= BITS_PER_UNIT;
1577
1578 /* We need to keep this in perspective. We are replacing a read
1579 with a sequence of insns, but the read will almost certainly be
1580 in cache, so it is not going to be an expensive one. Thus, we
1581 are not willing to do a multi insn shift or worse a subroutine
1582 call to get rid of the read. */
1583 if (shift)
1584 {
1585 if (access_size > UNITS_PER_WORD)
1586 return false;
1587
1588 shift_seq = find_shift_sequence (read_reg, access_size, store_info,
1589 read_info, shift);
1590 if (!shift_seq)
1591 return false;
1592 }
1593
1594 if (dump_file)
1595 fprintf (dump_file, "replacing load at %d from store at %d\n",
1596 INSN_UID (read_insn->insn), INSN_UID (store_insn->insn));
1597
1598 if (validate_change (read_insn->insn, loc, read_reg, 0))
1599 {
1600 rtx insns;
1601 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1602
1603 if (read_mode == store_mode)
1604 {
1605 start_sequence ();
1606
1607 /* The modes are the same and everything lines up. Just
1608 generate a simple move. */
1609 emit_move_insn (read_reg, store_info->rhs);
1610 if (dump_file)
1611 fprintf (dump_file, " -- adding move insn r%d = r%d\n",
1612 REGNO (read_reg), REGNO (store_info->rhs));
1613 insns = get_insns ();
1614 end_sequence ();
1615 }
1616 else if (shift)
1617 insns = shift_seq;
1618 else
1619 {
1620 /* The modes are different but the lsb are in the same
1621 place, we need to extract the value in the right from the
1622 rhs of the store. */
1623 start_sequence ();
1624 convert_move (read_reg, store_info->rhs, 1);
1625
1626 if (dump_file)
1627 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1628 REGNO (read_reg), GET_MODE_NAME (read_mode),
1629 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1630 insns = get_insns ();
1631 end_sequence ();
1632 }
1633
1634 /* Insert this right before the store insn where it will be safe
1635 from later insns that might change it before the read. */
1636 emit_insn_before (insns, store_insn->insn);
1637
1638 /* And now for the kludge part: cselib croaks if you just
1639 return at this point. There are two reasons for this:
1640
1641 1) Cselib has an idea of how many pseudos there are and
1642 that does not include the new ones we just added.
1643
1644 2) Cselib does not know about the move insn we added
1645 above the store_info, and there is no way to tell it
1646 about it, because it has "moved on".
1647
1648 Problem (1) is fixable with a certain amount of engineering.
1649 Problem (2) is requires starting the bb from scratch. This
1650 could be expensive.
1651
1652 So we are just going to have to lie. The move/extraction
1653 insns are not really an issue, cselib did not see them. But
1654 the use of the new pseudo read_insn is a real problem because
1655 cselib has not scanned this insn. The way that we solve this
1656 problem is that we are just going to put the mem back for now
1657 and when we are finished with the block, we undo this. We
1658 keep a table of mems to get rid of. At the end of the basic
1659 block we can put them back. */
1660
1661 *loc = read_info->mem;
1662 deferred_change->next = deferred_change_list;
1663 deferred_change_list = deferred_change;
1664 deferred_change->loc = loc;
1665 deferred_change->reg = read_reg;
1666
1667 /* Get rid of the read_info, from the point of view of the
1668 rest of dse, play like this read never happened. */
1669 read_insn->read_rec = read_info->next;
1670 pool_free (read_info_pool, read_info);
1671 return true;
1672 }
1673 else
1674 {
1675 if (dump_file)
1676 fprintf (dump_file, " -- validation failure\n");
1677 return false;
1678 }
1679 }
1680
1681 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1682 if LOC is a mem and if it is look at the address and kill any
1683 appropriate stores that may be active. */
1684
1685 static int
1686 check_mem_read_rtx (rtx *loc, void *data)
1687 {
1688 rtx mem = *loc;
1689 bb_info_t bb_info;
1690 insn_info_t insn_info;
1691 HOST_WIDE_INT offset = 0;
1692 HOST_WIDE_INT width = 0;
1693 alias_set_type spill_alias_set = 0;
1694 cselib_val *base = NULL;
1695 int group_id;
1696 read_info_t read_info;
1697
1698 if (!mem || !MEM_P (mem))
1699 return 0;
1700
1701 bb_info = (bb_info_t) data;
1702 insn_info = bb_info->last_insn;
1703
1704 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1705 || (MEM_VOLATILE_P (mem)))
1706 {
1707 if (dump_file)
1708 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1709 add_wild_read (bb_info);
1710 insn_info->cannot_delete = true;
1711 return 0;
1712 }
1713
1714 /* If it is reading readonly mem, then there can be no conflict with
1715 another write. */
1716 if (MEM_READONLY_P (mem))
1717 return 0;
1718
1719 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1720 {
1721 if (dump_file)
1722 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1723 add_wild_read (bb_info);
1724 return 0;
1725 }
1726
1727 if (GET_MODE (mem) == BLKmode)
1728 width = -1;
1729 else
1730 width = GET_MODE_SIZE (GET_MODE (mem));
1731
1732 read_info = pool_alloc (read_info_pool);
1733 read_info->group_id = group_id;
1734 read_info->mem = mem;
1735 read_info->alias_set = spill_alias_set;
1736 read_info->begin = offset;
1737 read_info->end = offset + width;
1738 read_info->next = insn_info->read_rec;
1739 insn_info->read_rec = read_info;
1740
1741 /* We ignore the clobbers in store_info. The is mildly aggressive,
1742 but there really should not be a clobber followed by a read. */
1743
1744 if (spill_alias_set)
1745 {
1746 insn_info_t i_ptr = active_local_stores;
1747 insn_info_t last = NULL;
1748
1749 if (dump_file)
1750 fprintf (dump_file, " processing spill load %d\n",
1751 (int) spill_alias_set);
1752
1753 while (i_ptr)
1754 {
1755 store_info_t store_info = i_ptr->store_rec;
1756
1757 /* Skip the clobbers. */
1758 while (!store_info->is_set)
1759 store_info = store_info->next;
1760
1761 if (store_info->alias_set == spill_alias_set)
1762 {
1763 if (dump_file)
1764 dump_insn_info ("removing from active", i_ptr);
1765
1766 if (last)
1767 last->next_local_store = i_ptr->next_local_store;
1768 else
1769 active_local_stores = i_ptr->next_local_store;
1770 }
1771 else
1772 last = i_ptr;
1773 i_ptr = i_ptr->next_local_store;
1774 }
1775 }
1776 else if (group_id >= 0)
1777 {
1778 /* This is the restricted case where the base is a constant or
1779 the frame pointer and offset is a constant. */
1780 insn_info_t i_ptr = active_local_stores;
1781 insn_info_t last = NULL;
1782
1783 if (dump_file)
1784 {
1785 if (width == -1)
1786 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1787 group_id);
1788 else
1789 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1790 group_id, (int)offset, (int)(offset+width));
1791 }
1792
1793 while (i_ptr)
1794 {
1795 bool remove = false;
1796 store_info_t store_info = i_ptr->store_rec;
1797
1798 /* Skip the clobbers. */
1799 while (!store_info->is_set)
1800 store_info = store_info->next;
1801
1802 /* There are three cases here. */
1803 if (store_info->group_id < 0)
1804 /* We have a cselib store followed by a read from a
1805 const base. */
1806 remove
1807 = canon_true_dependence (store_info->mem,
1808 GET_MODE (store_info->mem),
1809 store_info->mem_addr,
1810 mem, rtx_varies_p);
1811
1812 else if (group_id == store_info->group_id)
1813 {
1814 /* This is a block mode load. We may get lucky and
1815 canon_true_dependence may save the day. */
1816 if (width == -1)
1817 remove
1818 = canon_true_dependence (store_info->mem,
1819 GET_MODE (store_info->mem),
1820 store_info->mem_addr,
1821 mem, rtx_varies_p);
1822
1823 /* If this read is just reading back something that we just
1824 stored, rewrite the read. */
1825 else
1826 {
1827 if (store_info->rhs
1828 && (offset >= store_info->begin)
1829 && (offset + width <= store_info->end))
1830 {
1831 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1832
1833 if ((store_info->positions_needed & mask) == mask
1834 && replace_read (store_info, i_ptr,
1835 read_info, insn_info, loc))
1836 return 0;
1837 }
1838 /* The bases are the same, just see if the offsets
1839 overlap. */
1840 if ((offset < store_info->end)
1841 && (offset + width > store_info->begin))
1842 remove = true;
1843 }
1844 }
1845
1846 /* else
1847 The else case that is missing here is that the
1848 bases are constant but different. There is nothing
1849 to do here because there is no overlap. */
1850
1851 if (remove)
1852 {
1853 if (dump_file)
1854 dump_insn_info ("removing from active", i_ptr);
1855
1856 if (last)
1857 last->next_local_store = i_ptr->next_local_store;
1858 else
1859 active_local_stores = i_ptr->next_local_store;
1860 }
1861 else
1862 last = i_ptr;
1863 i_ptr = i_ptr->next_local_store;
1864 }
1865 }
1866 else
1867 {
1868 insn_info_t i_ptr = active_local_stores;
1869 insn_info_t last = NULL;
1870 if (dump_file)
1871 {
1872 fprintf (dump_file, " processing cselib load mem:");
1873 print_inline_rtx (dump_file, mem, 0);
1874 fprintf (dump_file, "\n");
1875 }
1876
1877 while (i_ptr)
1878 {
1879 bool remove = false;
1880 store_info_t store_info = i_ptr->store_rec;
1881
1882 if (dump_file)
1883 fprintf (dump_file, " processing cselib load against insn %d\n",
1884 INSN_UID (i_ptr->insn));
1885
1886 /* Skip the clobbers. */
1887 while (!store_info->is_set)
1888 store_info = store_info->next;
1889
1890 /* If this read is just reading back something that we just
1891 stored, rewrite the read. */
1892 if (store_info->rhs
1893 && store_info->group_id == -1
1894 && store_info->cse_base == base
1895 && (offset >= store_info->begin)
1896 && (offset + width <= store_info->end))
1897 {
1898 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1899
1900 if ((store_info->positions_needed & mask) == mask
1901 && replace_read (store_info, i_ptr,
1902 read_info, insn_info, loc))
1903 return 0;
1904 }
1905
1906 if (!store_info->alias_set)
1907 remove = canon_true_dependence (store_info->mem,
1908 GET_MODE (store_info->mem),
1909 store_info->mem_addr,
1910 mem, rtx_varies_p);
1911
1912 if (remove)
1913 {
1914 if (dump_file)
1915 dump_insn_info ("removing from active", i_ptr);
1916
1917 if (last)
1918 last->next_local_store = i_ptr->next_local_store;
1919 else
1920 active_local_stores = i_ptr->next_local_store;
1921 }
1922 else
1923 last = i_ptr;
1924 i_ptr = i_ptr->next_local_store;
1925 }
1926 }
1927 return 0;
1928 }
1929
1930 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1931 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1932 true for any part of *LOC. */
1933
1934 static void
1935 check_mem_read_use (rtx *loc, void *data)
1936 {
1937 for_each_rtx (loc, check_mem_read_rtx, data);
1938 }
1939
1940 /* Apply record_store to all candidate stores in INSN. Mark INSN
1941 if some part of it is not a candidate store and assigns to a
1942 non-register target. */
1943
1944 static void
1945 scan_insn (bb_info_t bb_info, rtx insn)
1946 {
1947 rtx body;
1948 insn_info_t insn_info = pool_alloc (insn_info_pool);
1949 int mems_found = 0;
1950 memset (insn_info, 0, sizeof (struct insn_info));
1951
1952 if (dump_file)
1953 fprintf (dump_file, "\n**scanning insn=%d\n",
1954 INSN_UID (insn));
1955
1956 insn_info->prev_insn = bb_info->last_insn;
1957 insn_info->insn = insn;
1958 bb_info->last_insn = insn_info;
1959
1960
1961 /* Cselib clears the table for this case, so we have to essentially
1962 do the same. */
1963 if (NONJUMP_INSN_P (insn)
1964 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1965 && MEM_VOLATILE_P (PATTERN (insn)))
1966 {
1967 add_wild_read (bb_info);
1968 insn_info->cannot_delete = true;
1969 return;
1970 }
1971
1972 /* Look at all of the uses in the insn. */
1973 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1974
1975 if (CALL_P (insn))
1976 {
1977 insn_info->cannot_delete = true;
1978
1979 /* Const functions cannot do anything bad i.e. read memory,
1980 however, they can read their parameters which may have
1981 been pushed onto the stack. */
1982 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1983 {
1984 insn_info_t i_ptr = active_local_stores;
1985 insn_info_t last = NULL;
1986
1987 if (dump_file)
1988 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1989
1990 /* See the head comment of the frame_read field. */
1991 if (reload_completed)
1992 insn_info->frame_read = true;
1993
1994 /* Loop over the active stores and remove those which are
1995 killed by the const function call. */
1996 while (i_ptr)
1997 {
1998 bool remove_store = false;
1999
2000 /* The stack pointer based stores are always killed. */
2001 if (i_ptr->stack_pointer_based)
2002 remove_store = true;
2003
2004 /* If the frame is read, the frame related stores are killed. */
2005 else if (insn_info->frame_read)
2006 {
2007 store_info_t store_info = i_ptr->store_rec;
2008
2009 /* Skip the clobbers. */
2010 while (!store_info->is_set)
2011 store_info = store_info->next;
2012
2013 if (store_info->group_id >= 0
2014 && VEC_index (group_info_t, rtx_group_vec,
2015 store_info->group_id)->frame_related)
2016 remove_store = true;
2017 }
2018
2019 if (remove_store)
2020 {
2021 if (dump_file)
2022 dump_insn_info ("removing from active", i_ptr);
2023
2024 if (last)
2025 last->next_local_store = i_ptr->next_local_store;
2026 else
2027 active_local_stores = i_ptr->next_local_store;
2028 }
2029 else
2030 last = i_ptr;
2031
2032 i_ptr = i_ptr->next_local_store;
2033 }
2034 }
2035
2036 else
2037 /* Every other call, including pure functions, may read memory. */
2038 add_wild_read (bb_info);
2039
2040 return;
2041 }
2042
2043 /* Assuming that there are sets in these insns, we cannot delete
2044 them. */
2045 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
2046 || volatile_refs_p (PATTERN (insn))
2047 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
2048 || (RTX_FRAME_RELATED_P (insn))
2049 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
2050 insn_info->cannot_delete = true;
2051
2052 body = PATTERN (insn);
2053 if (GET_CODE (body) == PARALLEL)
2054 {
2055 int i;
2056 for (i = 0; i < XVECLEN (body, 0); i++)
2057 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
2058 }
2059 else
2060 mems_found += record_store (body, bb_info);
2061
2062 if (dump_file)
2063 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
2064 mems_found, insn_info->cannot_delete ? "true" : "false");
2065
2066 /* If we found some sets of mems, and the insn has not been marked
2067 cannot delete, add it into the active_local_stores so that it can
2068 be locally deleted if found dead. Otherwise mark it as cannot
2069 delete. This simplifies the processing later. */
2070 if (mems_found == 1 && !insn_info->cannot_delete)
2071 {
2072 insn_info->next_local_store = active_local_stores;
2073 active_local_stores = insn_info;
2074 }
2075 else
2076 insn_info->cannot_delete = true;
2077 }
2078
2079
2080 /* Remove BASE from the set of active_local_stores. This is a
2081 callback from cselib that is used to get rid of the stores in
2082 active_local_stores. */
2083
2084 static void
2085 remove_useless_values (cselib_val *base)
2086 {
2087 insn_info_t insn_info = active_local_stores;
2088 insn_info_t last = NULL;
2089
2090 while (insn_info)
2091 {
2092 store_info_t store_info = insn_info->store_rec;
2093 bool delete = false;
2094
2095 /* If ANY of the store_infos match the cselib group that is
2096 being deleted, then the insn can not be deleted. */
2097 while (store_info)
2098 {
2099 if ((store_info->group_id == -1)
2100 && (store_info->cse_base == base))
2101 {
2102 delete = true;
2103 break;
2104 }
2105 store_info = store_info->next;
2106 }
2107
2108 if (delete)
2109 {
2110 if (last)
2111 last->next_local_store = insn_info->next_local_store;
2112 else
2113 active_local_stores = insn_info->next_local_store;
2114 free_store_info (insn_info);
2115 }
2116 else
2117 last = insn_info;
2118
2119 insn_info = insn_info->next_local_store;
2120 }
2121 }
2122
2123
2124 /* Do all of step 1. */
2125
2126 static void
2127 dse_step1 (void)
2128 {
2129 basic_block bb;
2130
2131 cselib_init (false);
2132 all_blocks = BITMAP_ALLOC (NULL);
2133 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
2134 bitmap_set_bit (all_blocks, EXIT_BLOCK);
2135
2136 FOR_ALL_BB (bb)
2137 {
2138 insn_info_t ptr;
2139 bb_info_t bb_info = pool_alloc (bb_info_pool);
2140
2141 memset (bb_info, 0, sizeof (struct bb_info));
2142 bitmap_set_bit (all_blocks, bb->index);
2143
2144 bb_table[bb->index] = bb_info;
2145 cselib_discard_hook = remove_useless_values;
2146
2147 if (bb->index >= NUM_FIXED_BLOCKS)
2148 {
2149 rtx insn;
2150
2151 cse_store_info_pool
2152 = create_alloc_pool ("cse_store_info_pool",
2153 sizeof (struct store_info), 100);
2154 active_local_stores = NULL;
2155 cselib_clear_table ();
2156
2157 /* Scan the insns. */
2158 FOR_BB_INSNS (bb, insn)
2159 {
2160 if (INSN_P (insn))
2161 scan_insn (bb_info, insn);
2162 cselib_process_insn (insn);
2163 }
2164
2165 /* This is something of a hack, because the global algorithm
2166 is supposed to take care of the case where stores go dead
2167 at the end of the function. However, the global
2168 algorithm must take a more conservative view of block
2169 mode reads than the local alg does. So to get the case
2170 where you have a store to the frame followed by a non
2171 overlapping block more read, we look at the active local
2172 stores at the end of the function and delete all of the
2173 frame and spill based ones. */
2174 if (stores_off_frame_dead_at_return
2175 && (EDGE_COUNT (bb->succs) == 0
2176 || (single_succ_p (bb)
2177 && single_succ (bb) == EXIT_BLOCK_PTR
2178 && ! current_function_calls_eh_return)))
2179 {
2180 insn_info_t i_ptr = active_local_stores;
2181 while (i_ptr)
2182 {
2183 store_info_t store_info = i_ptr->store_rec;
2184
2185 /* Skip the clobbers. */
2186 while (!store_info->is_set)
2187 store_info = store_info->next;
2188 if (store_info->alias_set)
2189 delete_dead_store_insn (i_ptr);
2190 else
2191 if (store_info->group_id >= 0)
2192 {
2193 group_info_t group
2194 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2195 if (group->frame_related)
2196 delete_dead_store_insn (i_ptr);
2197 }
2198
2199 i_ptr = i_ptr->next_local_store;
2200 }
2201 }
2202
2203 /* Get rid of the loads that were discovered in
2204 replace_read. Cselib is finished with this block. */
2205 while (deferred_change_list)
2206 {
2207 deferred_change_t next = deferred_change_list->next;
2208
2209 /* There is no reason to validate this change. That was
2210 done earlier. */
2211 *deferred_change_list->loc = deferred_change_list->reg;
2212 pool_free (deferred_change_pool, deferred_change_list);
2213 deferred_change_list = next;
2214 }
2215
2216 /* Get rid of all of the cselib based store_infos in this
2217 block and mark the containing insns as not being
2218 deletable. */
2219 ptr = bb_info->last_insn;
2220 while (ptr)
2221 {
2222 if (ptr->contains_cselib_groups)
2223 free_store_info (ptr);
2224 ptr = ptr->prev_insn;
2225 }
2226
2227 free_alloc_pool (cse_store_info_pool);
2228 }
2229 }
2230
2231 cselib_finish ();
2232 htab_empty (rtx_group_table);
2233 }
2234
2235 \f
2236 /*----------------------------------------------------------------------------
2237 Second step.
2238
2239 Assign each byte position in the stores that we are going to
2240 analyze globally to a position in the bitmaps. Returns true if
2241 there are any bit positions assigned.
2242 ----------------------------------------------------------------------------*/
2243
2244 static void
2245 dse_step2_init (void)
2246 {
2247 unsigned int i;
2248 group_info_t group;
2249
2250 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2251 {
2252 /* For all non stack related bases, we only consider a store to
2253 be deletable if there are two or more stores for that
2254 position. This is because it takes one store to make the
2255 other store redundant. However, for the stores that are
2256 stack related, we consider them if there is only one store
2257 for the position. We do this because the stack related
2258 stores can be deleted if their is no read between them and
2259 the end of the function.
2260
2261 To make this work in the current framework, we take the stack
2262 related bases add all of the bits from store1 into store2.
2263 This has the effect of making the eligible even if there is
2264 only one store. */
2265
2266 if (stores_off_frame_dead_at_return && group->frame_related)
2267 {
2268 bitmap_ior_into (group->store2_n, group->store1_n);
2269 bitmap_ior_into (group->store2_p, group->store1_p);
2270 if (dump_file)
2271 fprintf (dump_file, "group %d is frame related ", i);
2272 }
2273
2274 group->offset_map_size_n++;
2275 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2276 group->offset_map_size_p++;
2277 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2278 group->process_globally = false;
2279 if (dump_file)
2280 {
2281 fprintf (dump_file, "group %d(%d+%d): ", i,
2282 (int)bitmap_count_bits (group->store2_n),
2283 (int)bitmap_count_bits (group->store2_p));
2284 bitmap_print (dump_file, group->store2_n, "n ", " ");
2285 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2286 }
2287 }
2288 }
2289
2290
2291 /* Init the offset tables for the normal case. */
2292
2293 static bool
2294 dse_step2_nospill (void)
2295 {
2296 unsigned int i;
2297 group_info_t group;
2298 /* Position 0 is unused because 0 is used in the maps to mean
2299 unused. */
2300 current_position = 1;
2301
2302 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2303 {
2304 bitmap_iterator bi;
2305 unsigned int j;
2306
2307 if (group == clear_alias_group)
2308 continue;
2309
2310 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2311 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2312 bitmap_clear (group->group_kill);
2313
2314 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2315 {
2316 bitmap_set_bit (group->group_kill, current_position);
2317 group->offset_map_n[j] = current_position++;
2318 group->process_globally = true;
2319 }
2320 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2321 {
2322 bitmap_set_bit (group->group_kill, current_position);
2323 group->offset_map_p[j] = current_position++;
2324 group->process_globally = true;
2325 }
2326 }
2327 return current_position != 1;
2328 }
2329
2330
2331 /* Init the offset tables for the spill case. */
2332
2333 static bool
2334 dse_step2_spill (void)
2335 {
2336 unsigned int j;
2337 group_info_t group = clear_alias_group;
2338 bitmap_iterator bi;
2339
2340 /* Position 0 is unused because 0 is used in the maps to mean
2341 unused. */
2342 current_position = 1;
2343
2344 if (dump_file)
2345 {
2346 bitmap_print (dump_file, clear_alias_sets,
2347 "clear alias sets ", "\n");
2348 bitmap_print (dump_file, disqualified_clear_alias_sets,
2349 "disqualified clear alias sets ", "\n");
2350 }
2351
2352 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2353 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2354 bitmap_clear (group->group_kill);
2355
2356 /* Remove the disqualified positions from the store2_p set. */
2357 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2358
2359 /* We do not need to process the store2_n set because
2360 alias_sets are always positive. */
2361 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2362 {
2363 bitmap_set_bit (group->group_kill, current_position);
2364 group->offset_map_p[j] = current_position++;
2365 group->process_globally = true;
2366 }
2367
2368 return current_position != 1;
2369 }
2370
2371
2372 \f
2373 /*----------------------------------------------------------------------------
2374 Third step.
2375
2376 Build the bit vectors for the transfer functions.
2377 ----------------------------------------------------------------------------*/
2378
2379
2380 /* Note that this is NOT a general purpose function. Any mem that has
2381 an alias set registered here expected to be COMPLETELY unaliased:
2382 i.e it's addresses are not and need not be examined.
2383
2384 It is known that all references to this address will have this
2385 alias set and there are NO other references to this address in the
2386 function.
2387
2388 Currently the only place that is known to be clean enough to use
2389 this interface is the code that assigns the spill locations.
2390
2391 All of the mems that have alias_sets registered are subjected to a
2392 very powerful form of dse where function calls, volatile reads and
2393 writes, and reads from random location are not taken into account.
2394
2395 It is also assumed that these locations go dead when the function
2396 returns. This assumption could be relaxed if there were found to
2397 be places that this assumption was not correct.
2398
2399 The MODE is passed in and saved. The mode of each load or store to
2400 a mem with ALIAS_SET is checked against MEM. If the size of that
2401 load or store is different from MODE, processing is halted on this
2402 alias set. For the vast majority of aliases sets, all of the loads
2403 and stores will use the same mode. But vectors are treated
2404 differently: the alias set is established for the entire vector,
2405 but reload will insert loads and stores for individual elements and
2406 we do not necessarily have the information to track those separate
2407 elements. So when we see a mode mismatch, we just bail. */
2408
2409
2410 void
2411 dse_record_singleton_alias_set (alias_set_type alias_set,
2412 enum machine_mode mode)
2413 {
2414 struct clear_alias_mode_holder tmp_holder;
2415 struct clear_alias_mode_holder *entry;
2416 void **slot;
2417
2418 /* If we are not going to run dse, we need to return now or there
2419 will be problems with allocating the bitmaps. */
2420 if ((!gate_dse()) || !alias_set)
2421 return;
2422
2423 if (!clear_alias_sets)
2424 {
2425 clear_alias_sets = BITMAP_ALLOC (NULL);
2426 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2427 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2428 clear_alias_mode_eq, NULL);
2429 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2430 sizeof (struct clear_alias_mode_holder), 100);
2431 }
2432
2433 bitmap_set_bit (clear_alias_sets, alias_set);
2434
2435 tmp_holder.alias_set = alias_set;
2436
2437 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2438 gcc_assert (*slot == NULL);
2439
2440 *slot = entry = pool_alloc (clear_alias_mode_pool);
2441 entry->alias_set = alias_set;
2442 entry->mode = mode;
2443 }
2444
2445
2446 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2447
2448 void
2449 dse_invalidate_singleton_alias_set (alias_set_type alias_set)
2450 {
2451 if ((!gate_dse()) || !alias_set)
2452 return;
2453
2454 bitmap_clear_bit (clear_alias_sets, alias_set);
2455 }
2456
2457
2458 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2459 there, return 0. */
2460
2461 static int
2462 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2463 {
2464 if (offset < 0)
2465 {
2466 HOST_WIDE_INT offset_p = -offset;
2467 if (offset_p >= group_info->offset_map_size_n)
2468 return 0;
2469 return group_info->offset_map_n[offset_p];
2470 }
2471 else
2472 {
2473 if (offset >= group_info->offset_map_size_p)
2474 return 0;
2475 return group_info->offset_map_p[offset];
2476 }
2477 }
2478
2479
2480 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2481 may be NULL. */
2482
2483 static void
2484 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2485 {
2486 while (store_info)
2487 {
2488 HOST_WIDE_INT i;
2489 group_info_t group_info
2490 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2491 if (group_info->process_globally)
2492 for (i = store_info->begin; i < store_info->end; i++)
2493 {
2494 int index = get_bitmap_index (group_info, i);
2495 if (index != 0)
2496 {
2497 bitmap_set_bit (gen, index);
2498 if (kill)
2499 bitmap_clear_bit (kill, index);
2500 }
2501 }
2502 store_info = store_info->next;
2503 }
2504 }
2505
2506
2507 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2508 may be NULL. */
2509
2510 static void
2511 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2512 {
2513 while (store_info)
2514 {
2515 if (store_info->alias_set)
2516 {
2517 int index = get_bitmap_index (clear_alias_group,
2518 store_info->alias_set);
2519 if (index != 0)
2520 {
2521 bitmap_set_bit (gen, index);
2522 if (kill)
2523 bitmap_clear_bit (kill, index);
2524 }
2525 }
2526 store_info = store_info->next;
2527 }
2528 }
2529
2530
2531 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2532 may be NULL. */
2533
2534 static void
2535 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2536 {
2537 read_info_t read_info = insn_info->read_rec;
2538 int i;
2539 group_info_t group;
2540
2541 /* If this insn reads the frame, kill all the frame related stores. */
2542 if (insn_info->frame_read)
2543 {
2544 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2545 if (group->process_globally && group->frame_related)
2546 {
2547 if (kill)
2548 bitmap_ior_into (kill, group->group_kill);
2549 bitmap_and_compl_into (gen, group->group_kill);
2550 }
2551 }
2552
2553 while (read_info)
2554 {
2555 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2556 {
2557 if (group->process_globally)
2558 {
2559 if (i == read_info->group_id)
2560 {
2561 if (read_info->begin > read_info->end)
2562 {
2563 /* Begin > end for block mode reads. */
2564 if (kill)
2565 bitmap_ior_into (kill, group->group_kill);
2566 bitmap_and_compl_into (gen, group->group_kill);
2567 }
2568 else
2569 {
2570 /* The groups are the same, just process the
2571 offsets. */
2572 HOST_WIDE_INT j;
2573 for (j = read_info->begin; j < read_info->end; j++)
2574 {
2575 int index = get_bitmap_index (group, j);
2576 if (index != 0)
2577 {
2578 if (kill)
2579 bitmap_set_bit (kill, index);
2580 bitmap_clear_bit (gen, index);
2581 }
2582 }
2583 }
2584 }
2585 else
2586 {
2587 /* The groups are different, if the alias sets
2588 conflict, clear the entire group. We only need
2589 to apply this test if the read_info is a cselib
2590 read. Anything with a constant base cannot alias
2591 something else with a different constant
2592 base. */
2593 if ((read_info->group_id < 0)
2594 && canon_true_dependence (group->base_mem,
2595 QImode,
2596 group->canon_base_mem,
2597 read_info->mem, rtx_varies_p))
2598 {
2599 if (kill)
2600 bitmap_ior_into (kill, group->group_kill);
2601 bitmap_and_compl_into (gen, group->group_kill);
2602 }
2603 }
2604 }
2605 }
2606
2607 read_info = read_info->next;
2608 }
2609 }
2610
2611 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2612 may be NULL. */
2613
2614 static void
2615 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2616 {
2617 while (read_info)
2618 {
2619 if (read_info->alias_set)
2620 {
2621 int index = get_bitmap_index (clear_alias_group,
2622 read_info->alias_set);
2623 if (index != 0)
2624 {
2625 if (kill)
2626 bitmap_set_bit (kill, index);
2627 bitmap_clear_bit (gen, index);
2628 }
2629 }
2630
2631 read_info = read_info->next;
2632 }
2633 }
2634
2635
2636 /* Return the insn in BB_INFO before the first wild read or if there
2637 are no wild reads in the block, return the last insn. */
2638
2639 static insn_info_t
2640 find_insn_before_first_wild_read (bb_info_t bb_info)
2641 {
2642 insn_info_t insn_info = bb_info->last_insn;
2643 insn_info_t last_wild_read = NULL;
2644
2645 while (insn_info)
2646 {
2647 if (insn_info->wild_read)
2648 {
2649 last_wild_read = insn_info->prev_insn;
2650 /* Block starts with wild read. */
2651 if (!last_wild_read)
2652 return NULL;
2653 }
2654
2655 insn_info = insn_info->prev_insn;
2656 }
2657
2658 if (last_wild_read)
2659 return last_wild_read;
2660 else
2661 return bb_info->last_insn;
2662 }
2663
2664
2665 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2666 the block in order to build the gen and kill sets for the block.
2667 We start at ptr which may be the last insn in the block or may be
2668 the first insn with a wild read. In the latter case we are able to
2669 skip the rest of the block because it just does not matter:
2670 anything that happens is hidden by the wild read. */
2671
2672 static void
2673 dse_step3_scan (bool for_spills, basic_block bb)
2674 {
2675 bb_info_t bb_info = bb_table[bb->index];
2676 insn_info_t insn_info;
2677
2678 if (for_spills)
2679 /* There are no wild reads in the spill case. */
2680 insn_info = bb_info->last_insn;
2681 else
2682 insn_info = find_insn_before_first_wild_read (bb_info);
2683
2684 /* In the spill case or in the no_spill case if there is no wild
2685 read in the block, we will need a kill set. */
2686 if (insn_info == bb_info->last_insn)
2687 {
2688 if (bb_info->kill)
2689 bitmap_clear (bb_info->kill);
2690 else
2691 bb_info->kill = BITMAP_ALLOC (NULL);
2692 }
2693 else
2694 if (bb_info->kill)
2695 BITMAP_FREE (bb_info->kill);
2696
2697 while (insn_info)
2698 {
2699 /* There may have been code deleted by the dce pass run before
2700 this phase. */
2701 if (insn_info->insn && INSN_P (insn_info->insn))
2702 {
2703 /* Process the read(s) last. */
2704 if (for_spills)
2705 {
2706 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2707 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2708 }
2709 else
2710 {
2711 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2712 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2713 }
2714 }
2715
2716 insn_info = insn_info->prev_insn;
2717 }
2718 }
2719
2720
2721 /* Set the gen set of the exit block, and also any block with no
2722 successors that does not have a wild read. */
2723
2724 static void
2725 dse_step3_exit_block_scan (bb_info_t bb_info)
2726 {
2727 /* The gen set is all 0's for the exit block except for the
2728 frame_pointer_group. */
2729
2730 if (stores_off_frame_dead_at_return)
2731 {
2732 unsigned int i;
2733 group_info_t group;
2734
2735 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2736 {
2737 if (group->process_globally && group->frame_related)
2738 bitmap_ior_into (bb_info->gen, group->group_kill);
2739 }
2740 }
2741 }
2742
2743
2744 /* Find all of the blocks that are not backwards reachable from the
2745 exit block or any block with no successors (BB). These are the
2746 infinite loops or infinite self loops. These blocks will still
2747 have their bits set in UNREACHABLE_BLOCKS. */
2748
2749 static void
2750 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2751 {
2752 edge e;
2753 edge_iterator ei;
2754
2755 if (TEST_BIT (unreachable_blocks, bb->index))
2756 {
2757 RESET_BIT (unreachable_blocks, bb->index);
2758 FOR_EACH_EDGE (e, ei, bb->preds)
2759 {
2760 mark_reachable_blocks (unreachable_blocks, e->src);
2761 }
2762 }
2763 }
2764
2765 /* Build the transfer functions for the function. */
2766
2767 static void
2768 dse_step3 (bool for_spills)
2769 {
2770 basic_block bb;
2771 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2772 sbitmap_iterator sbi;
2773 bitmap all_ones = NULL;
2774 unsigned int i;
2775
2776 sbitmap_ones (unreachable_blocks);
2777
2778 FOR_ALL_BB (bb)
2779 {
2780 bb_info_t bb_info = bb_table[bb->index];
2781 if (bb_info->gen)
2782 bitmap_clear (bb_info->gen);
2783 else
2784 bb_info->gen = BITMAP_ALLOC (NULL);
2785
2786 if (bb->index == ENTRY_BLOCK)
2787 ;
2788 else if (bb->index == EXIT_BLOCK)
2789 dse_step3_exit_block_scan (bb_info);
2790 else
2791 dse_step3_scan (for_spills, bb);
2792 if (EDGE_COUNT (bb->succs) == 0)
2793 mark_reachable_blocks (unreachable_blocks, bb);
2794
2795 /* If this is the second time dataflow is run, delete the old
2796 sets. */
2797 if (bb_info->in)
2798 BITMAP_FREE (bb_info->in);
2799 if (bb_info->out)
2800 BITMAP_FREE (bb_info->out);
2801 }
2802
2803 /* For any block in an infinite loop, we must initialize the out set
2804 to all ones. This could be expensive, but almost never occurs in
2805 practice. However, it is common in regression tests. */
2806 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2807 {
2808 if (bitmap_bit_p (all_blocks, i))
2809 {
2810 bb_info_t bb_info = bb_table[i];
2811 if (!all_ones)
2812 {
2813 unsigned int j;
2814 group_info_t group;
2815
2816 all_ones = BITMAP_ALLOC (NULL);
2817 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2818 bitmap_ior_into (all_ones, group->group_kill);
2819 }
2820 if (!bb_info->out)
2821 {
2822 bb_info->out = BITMAP_ALLOC (NULL);
2823 bitmap_copy (bb_info->out, all_ones);
2824 }
2825 }
2826 }
2827
2828 if (all_ones)
2829 BITMAP_FREE (all_ones);
2830 sbitmap_free (unreachable_blocks);
2831 }
2832
2833
2834 \f
2835 /*----------------------------------------------------------------------------
2836 Fourth step.
2837
2838 Solve the bitvector equations.
2839 ----------------------------------------------------------------------------*/
2840
2841
2842 /* Confluence function for blocks with no successors. Create an out
2843 set from the gen set of the exit block. This block logically has
2844 the exit block as a successor. */
2845
2846
2847
2848 static void
2849 dse_confluence_0 (basic_block bb)
2850 {
2851 bb_info_t bb_info = bb_table[bb->index];
2852
2853 if (bb->index == EXIT_BLOCK)
2854 return;
2855
2856 if (!bb_info->out)
2857 {
2858 bb_info->out = BITMAP_ALLOC (NULL);
2859 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2860 }
2861 }
2862
2863 /* Propagate the information from the in set of the dest of E to the
2864 out set of the src of E. If the various in or out sets are not
2865 there, that means they are all ones. */
2866
2867 static void
2868 dse_confluence_n (edge e)
2869 {
2870 bb_info_t src_info = bb_table[e->src->index];
2871 bb_info_t dest_info = bb_table[e->dest->index];
2872
2873 if (dest_info->in)
2874 {
2875 if (src_info->out)
2876 bitmap_and_into (src_info->out, dest_info->in);
2877 else
2878 {
2879 src_info->out = BITMAP_ALLOC (NULL);
2880 bitmap_copy (src_info->out, dest_info->in);
2881 }
2882 }
2883 }
2884
2885
2886 /* Propagate the info from the out to the in set of BB_INDEX's basic
2887 block. There are three cases:
2888
2889 1) The block has no kill set. In this case the kill set is all
2890 ones. It does not matter what the out set of the block is, none of
2891 the info can reach the top. The only thing that reaches the top is
2892 the gen set and we just copy the set.
2893
2894 2) There is a kill set but no out set and bb has successors. In
2895 this case we just return. Eventually an out set will be created and
2896 it is better to wait than to create a set of ones.
2897
2898 3) There is both a kill and out set. We apply the obvious transfer
2899 function.
2900 */
2901
2902 static bool
2903 dse_transfer_function (int bb_index)
2904 {
2905 bb_info_t bb_info = bb_table[bb_index];
2906
2907 if (bb_info->kill)
2908 {
2909 if (bb_info->out)
2910 {
2911 /* Case 3 above. */
2912 if (bb_info->in)
2913 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2914 bb_info->out, bb_info->kill);
2915 else
2916 {
2917 bb_info->in = BITMAP_ALLOC (NULL);
2918 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2919 bb_info->out, bb_info->kill);
2920 return true;
2921 }
2922 }
2923 else
2924 /* Case 2 above. */
2925 return false;
2926 }
2927 else
2928 {
2929 /* Case 1 above. If there is already an in set, nothing
2930 happens. */
2931 if (bb_info->in)
2932 return false;
2933 else
2934 {
2935 bb_info->in = BITMAP_ALLOC (NULL);
2936 bitmap_copy (bb_info->in, bb_info->gen);
2937 return true;
2938 }
2939 }
2940 }
2941
2942 /* Solve the dataflow equations. */
2943
2944 static void
2945 dse_step4 (void)
2946 {
2947 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2948 dse_confluence_n, dse_transfer_function,
2949 all_blocks, df_get_postorder (DF_BACKWARD),
2950 df_get_n_blocks (DF_BACKWARD));
2951 if (dump_file)
2952 {
2953 basic_block bb;
2954
2955 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2956 FOR_ALL_BB (bb)
2957 {
2958 bb_info_t bb_info = bb_table[bb->index];
2959
2960 df_print_bb_index (bb, dump_file);
2961 if (bb_info->in)
2962 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2963 else
2964 fprintf (dump_file, " in: *MISSING*\n");
2965 if (bb_info->gen)
2966 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2967 else
2968 fprintf (dump_file, " gen: *MISSING*\n");
2969 if (bb_info->kill)
2970 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2971 else
2972 fprintf (dump_file, " kill: *MISSING*\n");
2973 if (bb_info->out)
2974 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2975 else
2976 fprintf (dump_file, " out: *MISSING*\n\n");
2977 }
2978 }
2979 }
2980
2981
2982 \f
2983 /*----------------------------------------------------------------------------
2984 Fifth step.
2985
2986 Delete the stores that can only be deleted using the global information.
2987 ----------------------------------------------------------------------------*/
2988
2989
2990 static void
2991 dse_step5_nospill (void)
2992 {
2993 basic_block bb;
2994 FOR_EACH_BB (bb)
2995 {
2996 bb_info_t bb_info = bb_table[bb->index];
2997 insn_info_t insn_info = bb_info->last_insn;
2998 bitmap v = bb_info->out;
2999
3000 while (insn_info)
3001 {
3002 bool deleted = false;
3003 if (dump_file && insn_info->insn)
3004 {
3005 fprintf (dump_file, "starting to process insn %d\n",
3006 INSN_UID (insn_info->insn));
3007 bitmap_print (dump_file, v, " v: ", "\n");
3008 }
3009
3010 /* There may have been code deleted by the dce pass run before
3011 this phase. */
3012 if (insn_info->insn
3013 && INSN_P (insn_info->insn)
3014 && (!insn_info->cannot_delete)
3015 && (!bitmap_empty_p (v)))
3016 {
3017 store_info_t store_info = insn_info->store_rec;
3018
3019 /* Try to delete the current insn. */
3020 deleted = true;
3021
3022 /* Skip the clobbers. */
3023 while (!store_info->is_set)
3024 store_info = store_info->next;
3025
3026 if (store_info->alias_set)
3027 deleted = false;
3028 else
3029 {
3030 HOST_WIDE_INT i;
3031 group_info_t group_info
3032 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
3033
3034 for (i = store_info->begin; i < store_info->end; i++)
3035 {
3036 int index = get_bitmap_index (group_info, i);
3037
3038 if (dump_file)
3039 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
3040 if (index == 0 || !bitmap_bit_p (v, index))
3041 {
3042 if (dump_file)
3043 fprintf (dump_file, "failing at i = %d\n", (int)i);
3044 deleted = false;
3045 break;
3046 }
3047 }
3048 }
3049 if (deleted)
3050 {
3051 if (dbg_cnt (dse))
3052 {
3053 check_for_inc_dec (insn_info->insn);
3054 delete_insn (insn_info->insn);
3055 insn_info->insn = NULL;
3056 globally_deleted++;
3057 }
3058 }
3059 }
3060 /* We do want to process the local info if the insn was
3061 deleted. For instance, if the insn did a wild read, we
3062 no longer need to trash the info. */
3063 if (insn_info->insn
3064 && INSN_P (insn_info->insn)
3065 && (!deleted))
3066 {
3067 scan_stores_nospill (insn_info->store_rec, v, NULL);
3068 if (insn_info->wild_read)
3069 {
3070 if (dump_file)
3071 fprintf (dump_file, "wild read\n");
3072 bitmap_clear (v);
3073 }
3074 else if (insn_info->read_rec)
3075 {
3076 if (dump_file)
3077 fprintf (dump_file, "regular read\n");
3078 scan_reads_nospill (insn_info, v, NULL);
3079 }
3080 }
3081
3082 insn_info = insn_info->prev_insn;
3083 }
3084 }
3085 }
3086
3087
3088 static void
3089 dse_step5_spill (void)
3090 {
3091 basic_block bb;
3092 FOR_EACH_BB (bb)
3093 {
3094 bb_info_t bb_info = bb_table[bb->index];
3095 insn_info_t insn_info = bb_info->last_insn;
3096 bitmap v = bb_info->out;
3097
3098 while (insn_info)
3099 {
3100 bool deleted = false;
3101 /* There may have been code deleted by the dce pass run before
3102 this phase. */
3103 if (insn_info->insn
3104 && INSN_P (insn_info->insn)
3105 && (!insn_info->cannot_delete)
3106 && (!bitmap_empty_p (v)))
3107 {
3108 /* Try to delete the current insn. */
3109 store_info_t store_info = insn_info->store_rec;
3110 deleted = true;
3111
3112 while (store_info)
3113 {
3114 if (store_info->alias_set)
3115 {
3116 int index = get_bitmap_index (clear_alias_group,
3117 store_info->alias_set);
3118 if (index == 0 || !bitmap_bit_p (v, index))
3119 {
3120 deleted = false;
3121 break;
3122 }
3123 }
3124 else
3125 deleted = false;
3126 store_info = store_info->next;
3127 }
3128 if (deleted && dbg_cnt (dse))
3129 {
3130 if (dump_file)
3131 fprintf (dump_file, "Spill deleting insn %d\n",
3132 INSN_UID (insn_info->insn));
3133 check_for_inc_dec (insn_info->insn);
3134 delete_insn (insn_info->insn);
3135 spill_deleted++;
3136 insn_info->insn = NULL;
3137 }
3138 }
3139
3140 if (insn_info->insn
3141 && INSN_P (insn_info->insn)
3142 && (!deleted))
3143 {
3144 scan_stores_spill (insn_info->store_rec, v, NULL);
3145 scan_reads_spill (insn_info->read_rec, v, NULL);
3146 }
3147
3148 insn_info = insn_info->prev_insn;
3149 }
3150 }
3151 }
3152
3153
3154 \f
3155 /*----------------------------------------------------------------------------
3156 Sixth step.
3157
3158 Destroy everything left standing.
3159 ----------------------------------------------------------------------------*/
3160
3161 static void
3162 dse_step6 (bool global_done)
3163 {
3164 unsigned int i;
3165 group_info_t group;
3166 basic_block bb;
3167
3168 if (global_done)
3169 {
3170 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3171 {
3172 free (group->offset_map_n);
3173 free (group->offset_map_p);
3174 BITMAP_FREE (group->store1_n);
3175 BITMAP_FREE (group->store1_p);
3176 BITMAP_FREE (group->store2_n);
3177 BITMAP_FREE (group->store2_p);
3178 BITMAP_FREE (group->group_kill);
3179 }
3180
3181 FOR_ALL_BB (bb)
3182 {
3183 bb_info_t bb_info = bb_table[bb->index];
3184 BITMAP_FREE (bb_info->gen);
3185 if (bb_info->kill)
3186 BITMAP_FREE (bb_info->kill);
3187 if (bb_info->in)
3188 BITMAP_FREE (bb_info->in);
3189 if (bb_info->out)
3190 BITMAP_FREE (bb_info->out);
3191 }
3192 }
3193 else
3194 {
3195 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3196 {
3197 BITMAP_FREE (group->store1_n);
3198 BITMAP_FREE (group->store1_p);
3199 BITMAP_FREE (group->store2_n);
3200 BITMAP_FREE (group->store2_p);
3201 BITMAP_FREE (group->group_kill);
3202 }
3203 }
3204
3205 if (clear_alias_sets)
3206 {
3207 BITMAP_FREE (clear_alias_sets);
3208 BITMAP_FREE (disqualified_clear_alias_sets);
3209 free_alloc_pool (clear_alias_mode_pool);
3210 htab_delete (clear_alias_mode_table);
3211 }
3212
3213 end_alias_analysis ();
3214 free (bb_table);
3215 htab_delete (rtx_group_table);
3216 VEC_free (group_info_t, heap, rtx_group_vec);
3217 BITMAP_FREE (all_blocks);
3218 BITMAP_FREE (scratch);
3219
3220 free_alloc_pool (rtx_store_info_pool);
3221 free_alloc_pool (read_info_pool);
3222 free_alloc_pool (insn_info_pool);
3223 free_alloc_pool (bb_info_pool);
3224 free_alloc_pool (rtx_group_info_pool);
3225 free_alloc_pool (deferred_change_pool);
3226 }
3227
3228
3229
3230 /* -------------------------------------------------------------------------
3231 DSE
3232 ------------------------------------------------------------------------- */
3233
3234 /* Callback for running pass_rtl_dse. */
3235
3236 static unsigned int
3237 rest_of_handle_dse (void)
3238 {
3239 bool did_global = false;
3240
3241 df_set_flags (DF_DEFER_INSN_RESCAN);
3242
3243 dse_step0 ();
3244 dse_step1 ();
3245 dse_step2_init ();
3246 if (dse_step2_nospill ())
3247 {
3248 df_set_flags (DF_LR_RUN_DCE);
3249 df_analyze ();
3250 did_global = true;
3251 if (dump_file)
3252 fprintf (dump_file, "doing global processing\n");
3253 dse_step3 (false);
3254 dse_step4 ();
3255 dse_step5_nospill ();
3256 }
3257
3258 /* For the instance of dse that runs after reload, we make a special
3259 pass to process the spills. These are special in that they are
3260 totally transparent, i.e, there is no aliasing issues that need
3261 to be considered. This means that the wild reads that kill
3262 everything else do not apply here. */
3263 if (clear_alias_sets && dse_step2_spill ())
3264 {
3265 if (!did_global)
3266 {
3267 df_set_flags (DF_LR_RUN_DCE);
3268 df_analyze ();
3269 }
3270 did_global = true;
3271 if (dump_file)
3272 fprintf (dump_file, "doing global spill processing\n");
3273 dse_step3 (true);
3274 dse_step4 ();
3275 dse_step5_spill ();
3276 }
3277
3278 dse_step6 (did_global);
3279
3280 if (dump_file)
3281 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3282 locally_deleted, globally_deleted, spill_deleted);
3283 return 0;
3284 }
3285
3286 static bool
3287 gate_dse (void)
3288 {
3289 return gate_dse1 () || gate_dse2 ();
3290 }
3291
3292 static bool
3293 gate_dse1 (void)
3294 {
3295 return optimize > 0 && flag_dse
3296 && dbg_cnt (dse1);
3297 }
3298
3299 static bool
3300 gate_dse2 (void)
3301 {
3302 return optimize > 0 && flag_dse
3303 && dbg_cnt (dse2);
3304 }
3305
3306 struct tree_opt_pass pass_rtl_dse1 =
3307 {
3308 "dse1", /* name */
3309 gate_dse1, /* gate */
3310 rest_of_handle_dse, /* execute */
3311 NULL, /* sub */
3312 NULL, /* next */
3313 0, /* static_pass_number */
3314 TV_DSE1, /* tv_id */
3315 0, /* properties_required */
3316 0, /* properties_provided */
3317 0, /* properties_destroyed */
3318 0, /* todo_flags_start */
3319 TODO_dump_func |
3320 TODO_df_finish | TODO_verify_rtl_sharing |
3321 TODO_ggc_collect, /* todo_flags_finish */
3322 'w' /* letter */
3323 };
3324
3325 struct tree_opt_pass pass_rtl_dse2 =
3326 {
3327 "dse2", /* name */
3328 gate_dse2, /* gate */
3329 rest_of_handle_dse, /* execute */
3330 NULL, /* sub */
3331 NULL, /* next */
3332 0, /* static_pass_number */
3333 TV_DSE2, /* tv_id */
3334 0, /* properties_required */
3335 0, /* properties_provided */
3336 0, /* properties_destroyed */
3337 0, /* todo_flags_start */
3338 TODO_dump_func |
3339 TODO_df_finish | TODO_verify_rtl_sharing |
3340 TODO_ggc_collect, /* todo_flags_finish */
3341 'w' /* letter */
3342 };