]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/postreload.c
genattrtab.c (write_header): Include hash-set.h...
[thirdparty/gcc.git] / gcc / postreload.c
1 /* Perform simple optimizations to clean up the result of reload.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24
25 #include "machmode.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "obstack.h"
30 #include "insn-config.h"
31 #include "flags.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "vec.h"
35 #include "input.h"
36 #include "function.h"
37 #include "symtab.h"
38 #include "expr.h"
39 #include "insn-codes.h"
40 #include "optabs.h"
41 #include "regs.h"
42 #include "predict.h"
43 #include "dominance.h"
44 #include "cfg.h"
45 #include "cfgrtl.h"
46 #include "cfgbuild.h"
47 #include "cfgcleanup.h"
48 #include "basic-block.h"
49 #include "reload.h"
50 #include "recog.h"
51 #include "cselib.h"
52 #include "diagnostic-core.h"
53 #include "except.h"
54 #include "double-int.h"
55 #include "input.h"
56 #include "alias.h"
57 #include "symtab.h"
58 #include "wide-int.h"
59 #include "inchash.h"
60 #include "tree.h"
61 #include "target.h"
62 #include "tree-pass.h"
63 #include "df.h"
64 #include "dbgcnt.h"
65
66 static int reload_cse_noop_set_p (rtx);
67 static bool reload_cse_simplify (rtx_insn *, rtx);
68 static void reload_cse_regs_1 (void);
69 static int reload_cse_simplify_set (rtx, rtx_insn *);
70 static int reload_cse_simplify_operands (rtx_insn *, rtx);
71
72 static void reload_combine (void);
73 static void reload_combine_note_use (rtx *, rtx_insn *, int, rtx);
74 static void reload_combine_note_store (rtx, const_rtx, void *);
75
76 static bool reload_cse_move2add (rtx_insn *);
77 static void move2add_note_store (rtx, const_rtx, void *);
78
79 /* Call cse / combine like post-reload optimization phases.
80 FIRST is the first instruction. */
81
82 static void
83 reload_cse_regs (rtx_insn *first ATTRIBUTE_UNUSED)
84 {
85 bool moves_converted;
86 reload_cse_regs_1 ();
87 reload_combine ();
88 moves_converted = reload_cse_move2add (first);
89 if (flag_expensive_optimizations)
90 {
91 if (moves_converted)
92 reload_combine ();
93 reload_cse_regs_1 ();
94 }
95 }
96
97 /* See whether a single set SET is a noop. */
98 static int
99 reload_cse_noop_set_p (rtx set)
100 {
101 if (cselib_reg_set_mode (SET_DEST (set)) != GET_MODE (SET_DEST (set)))
102 return 0;
103
104 return rtx_equal_for_cselib_p (SET_DEST (set), SET_SRC (set));
105 }
106
107 /* Try to simplify INSN. Return true if the CFG may have changed. */
108 static bool
109 reload_cse_simplify (rtx_insn *insn, rtx testreg)
110 {
111 rtx body = PATTERN (insn);
112 basic_block insn_bb = BLOCK_FOR_INSN (insn);
113 unsigned insn_bb_succs = EDGE_COUNT (insn_bb->succs);
114
115 if (GET_CODE (body) == SET)
116 {
117 int count = 0;
118
119 /* Simplify even if we may think it is a no-op.
120 We may think a memory load of a value smaller than WORD_SIZE
121 is redundant because we haven't taken into account possible
122 implicit extension. reload_cse_simplify_set() will bring
123 this out, so it's safer to simplify before we delete. */
124 count += reload_cse_simplify_set (body, insn);
125
126 if (!count && reload_cse_noop_set_p (body))
127 {
128 rtx value = SET_DEST (body);
129 if (REG_P (value)
130 && ! REG_FUNCTION_VALUE_P (value))
131 value = 0;
132 if (check_for_inc_dec (insn))
133 delete_insn_and_edges (insn);
134 /* We're done with this insn. */
135 goto done;
136 }
137
138 if (count > 0)
139 apply_change_group ();
140 else
141 reload_cse_simplify_operands (insn, testreg);
142 }
143 else if (GET_CODE (body) == PARALLEL)
144 {
145 int i;
146 int count = 0;
147 rtx value = NULL_RTX;
148
149 /* Registers mentioned in the clobber list for an asm cannot be reused
150 within the body of the asm. Invalidate those registers now so that
151 we don't try to substitute values for them. */
152 if (asm_noperands (body) >= 0)
153 {
154 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
155 {
156 rtx part = XVECEXP (body, 0, i);
157 if (GET_CODE (part) == CLOBBER && REG_P (XEXP (part, 0)))
158 cselib_invalidate_rtx (XEXP (part, 0));
159 }
160 }
161
162 /* If every action in a PARALLEL is a noop, we can delete
163 the entire PARALLEL. */
164 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
165 {
166 rtx part = XVECEXP (body, 0, i);
167 if (GET_CODE (part) == SET)
168 {
169 if (! reload_cse_noop_set_p (part))
170 break;
171 if (REG_P (SET_DEST (part))
172 && REG_FUNCTION_VALUE_P (SET_DEST (part)))
173 {
174 if (value)
175 break;
176 value = SET_DEST (part);
177 }
178 }
179 else if (GET_CODE (part) != CLOBBER)
180 break;
181 }
182
183 if (i < 0)
184 {
185 if (check_for_inc_dec (insn))
186 delete_insn_and_edges (insn);
187 /* We're done with this insn. */
188 goto done;
189 }
190
191 /* It's not a no-op, but we can try to simplify it. */
192 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
193 if (GET_CODE (XVECEXP (body, 0, i)) == SET)
194 count += reload_cse_simplify_set (XVECEXP (body, 0, i), insn);
195
196 if (count > 0)
197 apply_change_group ();
198 else
199 reload_cse_simplify_operands (insn, testreg);
200 }
201
202 done:
203 return (EDGE_COUNT (insn_bb->succs) != insn_bb_succs);
204 }
205
206 /* Do a very simple CSE pass over the hard registers.
207
208 This function detects no-op moves where we happened to assign two
209 different pseudo-registers to the same hard register, and then
210 copied one to the other. Reload will generate a useless
211 instruction copying a register to itself.
212
213 This function also detects cases where we load a value from memory
214 into two different registers, and (if memory is more expensive than
215 registers) changes it to simply copy the first register into the
216 second register.
217
218 Another optimization is performed that scans the operands of each
219 instruction to see whether the value is already available in a
220 hard register. It then replaces the operand with the hard register
221 if possible, much like an optional reload would. */
222
223 static void
224 reload_cse_regs_1 (void)
225 {
226 bool cfg_changed = false;
227 basic_block bb;
228 rtx_insn *insn;
229 rtx testreg = gen_rtx_REG (VOIDmode, -1);
230
231 cselib_init (CSELIB_RECORD_MEMORY);
232 init_alias_analysis ();
233
234 FOR_EACH_BB_FN (bb, cfun)
235 FOR_BB_INSNS (bb, insn)
236 {
237 if (INSN_P (insn))
238 cfg_changed |= reload_cse_simplify (insn, testreg);
239
240 cselib_process_insn (insn);
241 }
242
243 /* Clean up. */
244 end_alias_analysis ();
245 cselib_finish ();
246 if (cfg_changed)
247 cleanup_cfg (0);
248 }
249
250 /* Try to simplify a single SET instruction. SET is the set pattern.
251 INSN is the instruction it came from.
252 This function only handles one case: if we set a register to a value
253 which is not a register, we try to find that value in some other register
254 and change the set into a register copy. */
255
256 static int
257 reload_cse_simplify_set (rtx set, rtx_insn *insn)
258 {
259 int did_change = 0;
260 int dreg;
261 rtx src;
262 reg_class_t dclass;
263 int old_cost;
264 cselib_val *val;
265 struct elt_loc_list *l;
266 #ifdef LOAD_EXTEND_OP
267 enum rtx_code extend_op = UNKNOWN;
268 #endif
269 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
270
271 dreg = true_regnum (SET_DEST (set));
272 if (dreg < 0)
273 return 0;
274
275 src = SET_SRC (set);
276 if (side_effects_p (src) || true_regnum (src) >= 0)
277 return 0;
278
279 dclass = REGNO_REG_CLASS (dreg);
280
281 #ifdef LOAD_EXTEND_OP
282 /* When replacing a memory with a register, we need to honor assumptions
283 that combine made wrt the contents of sign bits. We'll do this by
284 generating an extend instruction instead of a reg->reg copy. Thus
285 the destination must be a register that we can widen. */
286 if (MEM_P (src)
287 && GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
288 && (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
289 && !REG_P (SET_DEST (set)))
290 return 0;
291 #endif
292
293 val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0, VOIDmode);
294 if (! val)
295 return 0;
296
297 /* If memory loads are cheaper than register copies, don't change them. */
298 if (MEM_P (src))
299 old_cost = memory_move_cost (GET_MODE (src), dclass, true);
300 else if (REG_P (src))
301 old_cost = register_move_cost (GET_MODE (src),
302 REGNO_REG_CLASS (REGNO (src)), dclass);
303 else
304 old_cost = set_src_cost (src, speed);
305
306 for (l = val->locs; l; l = l->next)
307 {
308 rtx this_rtx = l->loc;
309 int this_cost;
310
311 if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0))
312 {
313 #ifdef LOAD_EXTEND_OP
314 if (extend_op != UNKNOWN)
315 {
316 wide_int result;
317
318 if (!CONST_SCALAR_INT_P (this_rtx))
319 continue;
320
321 switch (extend_op)
322 {
323 case ZERO_EXTEND:
324 result = wide_int::from (std::make_pair (this_rtx,
325 GET_MODE (src)),
326 BITS_PER_WORD, UNSIGNED);
327 break;
328 case SIGN_EXTEND:
329 result = wide_int::from (std::make_pair (this_rtx,
330 GET_MODE (src)),
331 BITS_PER_WORD, SIGNED);
332 break;
333 default:
334 gcc_unreachable ();
335 }
336 this_rtx = immed_wide_int_const (result, word_mode);
337 }
338 #endif
339 this_cost = set_src_cost (this_rtx, speed);
340 }
341 else if (REG_P (this_rtx))
342 {
343 #ifdef LOAD_EXTEND_OP
344 if (extend_op != UNKNOWN)
345 {
346 this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx);
347 this_cost = set_src_cost (this_rtx, speed);
348 }
349 else
350 #endif
351 this_cost = register_move_cost (GET_MODE (this_rtx),
352 REGNO_REG_CLASS (REGNO (this_rtx)),
353 dclass);
354 }
355 else
356 continue;
357
358 /* If equal costs, prefer registers over anything else. That
359 tends to lead to smaller instructions on some machines. */
360 if (this_cost < old_cost
361 || (this_cost == old_cost
362 && REG_P (this_rtx)
363 && !REG_P (SET_SRC (set))))
364 {
365 #ifdef LOAD_EXTEND_OP
366 if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
367 && extend_op != UNKNOWN
368 #ifdef CANNOT_CHANGE_MODE_CLASS
369 && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
370 word_mode,
371 REGNO_REG_CLASS (REGNO (SET_DEST (set))))
372 #endif
373 )
374 {
375 rtx wide_dest = gen_rtx_REG (word_mode, REGNO (SET_DEST (set)));
376 ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set));
377 validate_change (insn, &SET_DEST (set), wide_dest, 1);
378 }
379 #endif
380
381 validate_unshare_change (insn, &SET_SRC (set), this_rtx, 1);
382 old_cost = this_cost, did_change = 1;
383 }
384 }
385
386 return did_change;
387 }
388
389 /* Try to replace operands in INSN with equivalent values that are already
390 in registers. This can be viewed as optional reloading.
391
392 For each non-register operand in the insn, see if any hard regs are
393 known to be equivalent to that operand. Record the alternatives which
394 can accept these hard registers. Among all alternatives, select the
395 ones which are better or equal to the one currently matching, where
396 "better" is in terms of '?' and '!' constraints. Among the remaining
397 alternatives, select the one which replaces most operands with
398 hard registers. */
399
400 static int
401 reload_cse_simplify_operands (rtx_insn *insn, rtx testreg)
402 {
403 int i, j;
404
405 /* For each operand, all registers that are equivalent to it. */
406 HARD_REG_SET equiv_regs[MAX_RECOG_OPERANDS];
407
408 const char *constraints[MAX_RECOG_OPERANDS];
409
410 /* Vector recording how bad an alternative is. */
411 int *alternative_reject;
412 /* Vector recording how many registers can be introduced by choosing
413 this alternative. */
414 int *alternative_nregs;
415 /* Array of vectors recording, for each operand and each alternative,
416 which hard register to substitute, or -1 if the operand should be
417 left as it is. */
418 int *op_alt_regno[MAX_RECOG_OPERANDS];
419 /* Array of alternatives, sorted in order of decreasing desirability. */
420 int *alternative_order;
421
422 extract_constrain_insn (insn);
423
424 if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0)
425 return 0;
426
427 alternative_reject = XALLOCAVEC (int, recog_data.n_alternatives);
428 alternative_nregs = XALLOCAVEC (int, recog_data.n_alternatives);
429 alternative_order = XALLOCAVEC (int, recog_data.n_alternatives);
430 memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int));
431 memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int));
432
433 /* For each operand, find out which regs are equivalent. */
434 for (i = 0; i < recog_data.n_operands; i++)
435 {
436 cselib_val *v;
437 struct elt_loc_list *l;
438 rtx op;
439
440 CLEAR_HARD_REG_SET (equiv_regs[i]);
441
442 /* cselib blows up on CODE_LABELs. Trying to fix that doesn't seem
443 right, so avoid the problem here. Likewise if we have a constant
444 and the insn pattern doesn't tell us the mode we need. */
445 if (LABEL_P (recog_data.operand[i])
446 || (CONSTANT_P (recog_data.operand[i])
447 && recog_data.operand_mode[i] == VOIDmode))
448 continue;
449
450 op = recog_data.operand[i];
451 #ifdef LOAD_EXTEND_OP
452 if (MEM_P (op)
453 && GET_MODE_BITSIZE (GET_MODE (op)) < BITS_PER_WORD
454 && LOAD_EXTEND_OP (GET_MODE (op)) != UNKNOWN)
455 {
456 rtx set = single_set (insn);
457
458 /* We might have multiple sets, some of which do implicit
459 extension. Punt on this for now. */
460 if (! set)
461 continue;
462 /* If the destination is also a MEM or a STRICT_LOW_PART, no
463 extension applies.
464 Also, if there is an explicit extension, we don't have to
465 worry about an implicit one. */
466 else if (MEM_P (SET_DEST (set))
467 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART
468 || GET_CODE (SET_SRC (set)) == ZERO_EXTEND
469 || GET_CODE (SET_SRC (set)) == SIGN_EXTEND)
470 ; /* Continue ordinary processing. */
471 #ifdef CANNOT_CHANGE_MODE_CLASS
472 /* If the register cannot change mode to word_mode, it follows that
473 it cannot have been used in word_mode. */
474 else if (REG_P (SET_DEST (set))
475 && CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
476 word_mode,
477 REGNO_REG_CLASS (REGNO (SET_DEST (set)))))
478 ; /* Continue ordinary processing. */
479 #endif
480 /* If this is a straight load, make the extension explicit. */
481 else if (REG_P (SET_DEST (set))
482 && recog_data.n_operands == 2
483 && SET_SRC (set) == op
484 && SET_DEST (set) == recog_data.operand[1-i])
485 {
486 validate_change (insn, recog_data.operand_loc[i],
487 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (op)),
488 word_mode, op),
489 1);
490 validate_change (insn, recog_data.operand_loc[1-i],
491 gen_rtx_REG (word_mode, REGNO (SET_DEST (set))),
492 1);
493 if (! apply_change_group ())
494 return 0;
495 return reload_cse_simplify_operands (insn, testreg);
496 }
497 else
498 /* ??? There might be arithmetic operations with memory that are
499 safe to optimize, but is it worth the trouble? */
500 continue;
501 }
502 #endif /* LOAD_EXTEND_OP */
503 if (side_effects_p (op))
504 continue;
505 v = cselib_lookup (op, recog_data.operand_mode[i], 0, VOIDmode);
506 if (! v)
507 continue;
508
509 for (l = v->locs; l; l = l->next)
510 if (REG_P (l->loc))
511 SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc));
512 }
513
514 alternative_mask preferred = get_preferred_alternatives (insn);
515 for (i = 0; i < recog_data.n_operands; i++)
516 {
517 machine_mode mode;
518 int regno;
519 const char *p;
520
521 op_alt_regno[i] = XALLOCAVEC (int, recog_data.n_alternatives);
522 for (j = 0; j < recog_data.n_alternatives; j++)
523 op_alt_regno[i][j] = -1;
524
525 p = constraints[i] = recog_data.constraints[i];
526 mode = recog_data.operand_mode[i];
527
528 /* Add the reject values for each alternative given by the constraints
529 for this operand. */
530 j = 0;
531 while (*p != '\0')
532 {
533 char c = *p++;
534 if (c == ',')
535 j++;
536 else if (c == '?')
537 alternative_reject[j] += 3;
538 else if (c == '!')
539 alternative_reject[j] += 300;
540 }
541
542 /* We won't change operands which are already registers. We
543 also don't want to modify output operands. */
544 regno = true_regnum (recog_data.operand[i]);
545 if (regno >= 0
546 || constraints[i][0] == '='
547 || constraints[i][0] == '+')
548 continue;
549
550 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
551 {
552 enum reg_class rclass = NO_REGS;
553
554 if (! TEST_HARD_REG_BIT (equiv_regs[i], regno))
555 continue;
556
557 SET_REGNO_RAW (testreg, regno);
558 PUT_MODE (testreg, mode);
559
560 /* We found a register equal to this operand. Now look for all
561 alternatives that can accept this register and have not been
562 assigned a register they can use yet. */
563 j = 0;
564 p = constraints[i];
565 for (;;)
566 {
567 char c = *p;
568
569 switch (c)
570 {
571 case 'g':
572 rclass = reg_class_subunion[rclass][GENERAL_REGS];
573 break;
574
575 default:
576 rclass
577 = (reg_class_subunion
578 [rclass]
579 [reg_class_for_constraint (lookup_constraint (p))]);
580 break;
581
582 case ',': case '\0':
583 /* See if REGNO fits this alternative, and set it up as the
584 replacement register if we don't have one for this
585 alternative yet and the operand being replaced is not
586 a cheap CONST_INT. */
587 if (op_alt_regno[i][j] == -1
588 && TEST_BIT (preferred, j)
589 && reg_fits_class_p (testreg, rclass, 0, mode)
590 && (!CONST_INT_P (recog_data.operand[i])
591 || (set_src_cost (recog_data.operand[i],
592 optimize_bb_for_speed_p
593 (BLOCK_FOR_INSN (insn)))
594 > set_src_cost (testreg,
595 optimize_bb_for_speed_p
596 (BLOCK_FOR_INSN (insn))))))
597 {
598 alternative_nregs[j]++;
599 op_alt_regno[i][j] = regno;
600 }
601 j++;
602 rclass = NO_REGS;
603 break;
604 }
605 p += CONSTRAINT_LEN (c, p);
606
607 if (c == '\0')
608 break;
609 }
610 }
611 }
612
613 /* Record all alternatives which are better or equal to the currently
614 matching one in the alternative_order array. */
615 for (i = j = 0; i < recog_data.n_alternatives; i++)
616 if (alternative_reject[i] <= alternative_reject[which_alternative])
617 alternative_order[j++] = i;
618 recog_data.n_alternatives = j;
619
620 /* Sort it. Given a small number of alternatives, a dumb algorithm
621 won't hurt too much. */
622 for (i = 0; i < recog_data.n_alternatives - 1; i++)
623 {
624 int best = i;
625 int best_reject = alternative_reject[alternative_order[i]];
626 int best_nregs = alternative_nregs[alternative_order[i]];
627 int tmp;
628
629 for (j = i + 1; j < recog_data.n_alternatives; j++)
630 {
631 int this_reject = alternative_reject[alternative_order[j]];
632 int this_nregs = alternative_nregs[alternative_order[j]];
633
634 if (this_reject < best_reject
635 || (this_reject == best_reject && this_nregs > best_nregs))
636 {
637 best = j;
638 best_reject = this_reject;
639 best_nregs = this_nregs;
640 }
641 }
642
643 tmp = alternative_order[best];
644 alternative_order[best] = alternative_order[i];
645 alternative_order[i] = tmp;
646 }
647
648 /* Substitute the operands as determined by op_alt_regno for the best
649 alternative. */
650 j = alternative_order[0];
651
652 for (i = 0; i < recog_data.n_operands; i++)
653 {
654 machine_mode mode = recog_data.operand_mode[i];
655 if (op_alt_regno[i][j] == -1)
656 continue;
657
658 validate_change (insn, recog_data.operand_loc[i],
659 gen_rtx_REG (mode, op_alt_regno[i][j]), 1);
660 }
661
662 for (i = recog_data.n_dups - 1; i >= 0; i--)
663 {
664 int op = recog_data.dup_num[i];
665 machine_mode mode = recog_data.operand_mode[op];
666
667 if (op_alt_regno[op][j] == -1)
668 continue;
669
670 validate_change (insn, recog_data.dup_loc[i],
671 gen_rtx_REG (mode, op_alt_regno[op][j]), 1);
672 }
673
674 return apply_change_group ();
675 }
676 \f
677 /* If reload couldn't use reg+reg+offset addressing, try to use reg+reg
678 addressing now.
679 This code might also be useful when reload gave up on reg+reg addressing
680 because of clashes between the return register and INDEX_REG_CLASS. */
681
682 /* The maximum number of uses of a register we can keep track of to
683 replace them with reg+reg addressing. */
684 #define RELOAD_COMBINE_MAX_USES 16
685
686 /* Describes a recorded use of a register. */
687 struct reg_use
688 {
689 /* The insn where a register has been used. */
690 rtx_insn *insn;
691 /* Points to the memory reference enclosing the use, if any, NULL_RTX
692 otherwise. */
693 rtx containing_mem;
694 /* Location of the register within INSN. */
695 rtx *usep;
696 /* The reverse uid of the insn. */
697 int ruid;
698 };
699
700 /* If the register is used in some unknown fashion, USE_INDEX is negative.
701 If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID
702 indicates where it is first set or clobbered.
703 Otherwise, USE_INDEX is the index of the last encountered use of the
704 register (which is first among these we have seen since we scan backwards).
705 USE_RUID indicates the first encountered, i.e. last, of these uses.
706 If ALL_OFFSETS_MATCH is true, all encountered uses were inside a PLUS
707 with a constant offset; OFFSET contains this constant in that case.
708 STORE_RUID is always meaningful if we only want to use a value in a
709 register in a different place: it denotes the next insn in the insn
710 stream (i.e. the last encountered) that sets or clobbers the register.
711 REAL_STORE_RUID is similar, but clobbers are ignored when updating it. */
712 static struct
713 {
714 struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
715 rtx offset;
716 int use_index;
717 int store_ruid;
718 int real_store_ruid;
719 int use_ruid;
720 bool all_offsets_match;
721 } reg_state[FIRST_PSEUDO_REGISTER];
722
723 /* Reverse linear uid. This is increased in reload_combine while scanning
724 the instructions from last to first. It is used to set last_label_ruid
725 and the store_ruid / use_ruid fields in reg_state. */
726 static int reload_combine_ruid;
727
728 /* The RUID of the last label we encountered in reload_combine. */
729 static int last_label_ruid;
730
731 /* The RUID of the last jump we encountered in reload_combine. */
732 static int last_jump_ruid;
733
734 /* The register numbers of the first and last index register. A value of
735 -1 in LAST_INDEX_REG indicates that we've previously computed these
736 values and found no suitable index registers. */
737 static int first_index_reg = -1;
738 static int last_index_reg;
739
740 #define LABEL_LIVE(LABEL) \
741 (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno])
742
743 /* Subroutine of reload_combine_split_ruids, called to fix up a single
744 ruid pointed to by *PRUID if it is higher than SPLIT_RUID. */
745
746 static inline void
747 reload_combine_split_one_ruid (int *pruid, int split_ruid)
748 {
749 if (*pruid > split_ruid)
750 (*pruid)++;
751 }
752
753 /* Called when we insert a new insn in a position we've already passed in
754 the scan. Examine all our state, increasing all ruids that are higher
755 than SPLIT_RUID by one in order to make room for a new insn. */
756
757 static void
758 reload_combine_split_ruids (int split_ruid)
759 {
760 unsigned i;
761
762 reload_combine_split_one_ruid (&reload_combine_ruid, split_ruid);
763 reload_combine_split_one_ruid (&last_label_ruid, split_ruid);
764 reload_combine_split_one_ruid (&last_jump_ruid, split_ruid);
765
766 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
767 {
768 int j, idx = reg_state[i].use_index;
769 reload_combine_split_one_ruid (&reg_state[i].use_ruid, split_ruid);
770 reload_combine_split_one_ruid (&reg_state[i].store_ruid, split_ruid);
771 reload_combine_split_one_ruid (&reg_state[i].real_store_ruid,
772 split_ruid);
773 if (idx < 0)
774 continue;
775 for (j = idx; j < RELOAD_COMBINE_MAX_USES; j++)
776 {
777 reload_combine_split_one_ruid (&reg_state[i].reg_use[j].ruid,
778 split_ruid);
779 }
780 }
781 }
782
783 /* Called when we are about to rescan a previously encountered insn with
784 reload_combine_note_use after modifying some part of it. This clears all
785 information about uses in that particular insn. */
786
787 static void
788 reload_combine_purge_insn_uses (rtx_insn *insn)
789 {
790 unsigned i;
791
792 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
793 {
794 int j, k, idx = reg_state[i].use_index;
795 if (idx < 0)
796 continue;
797 j = k = RELOAD_COMBINE_MAX_USES;
798 while (j-- > idx)
799 {
800 if (reg_state[i].reg_use[j].insn != insn)
801 {
802 k--;
803 if (k != j)
804 reg_state[i].reg_use[k] = reg_state[i].reg_use[j];
805 }
806 }
807 reg_state[i].use_index = k;
808 }
809 }
810
811 /* Called when we need to forget about all uses of REGNO after an insn
812 which is identified by RUID. */
813
814 static void
815 reload_combine_purge_reg_uses_after_ruid (unsigned regno, int ruid)
816 {
817 int j, k, idx = reg_state[regno].use_index;
818 if (idx < 0)
819 return;
820 j = k = RELOAD_COMBINE_MAX_USES;
821 while (j-- > idx)
822 {
823 if (reg_state[regno].reg_use[j].ruid >= ruid)
824 {
825 k--;
826 if (k != j)
827 reg_state[regno].reg_use[k] = reg_state[regno].reg_use[j];
828 }
829 }
830 reg_state[regno].use_index = k;
831 }
832
833 /* Find the use of REGNO with the ruid that is highest among those
834 lower than RUID_LIMIT, and return it if it is the only use of this
835 reg in the insn. Return NULL otherwise. */
836
837 static struct reg_use *
838 reload_combine_closest_single_use (unsigned regno, int ruid_limit)
839 {
840 int i, best_ruid = 0;
841 int use_idx = reg_state[regno].use_index;
842 struct reg_use *retval;
843
844 if (use_idx < 0)
845 return NULL;
846 retval = NULL;
847 for (i = use_idx; i < RELOAD_COMBINE_MAX_USES; i++)
848 {
849 struct reg_use *use = reg_state[regno].reg_use + i;
850 int this_ruid = use->ruid;
851 if (this_ruid >= ruid_limit)
852 continue;
853 if (this_ruid > best_ruid)
854 {
855 best_ruid = this_ruid;
856 retval = use;
857 }
858 else if (this_ruid == best_ruid)
859 retval = NULL;
860 }
861 if (last_label_ruid >= best_ruid)
862 return NULL;
863 return retval;
864 }
865
866 /* After we've moved an add insn, fix up any debug insns that occur
867 between the old location of the add and the new location. REG is
868 the destination register of the add insn; REPLACEMENT is the
869 SET_SRC of the add. FROM and TO specify the range in which we
870 should make this change on debug insns. */
871
872 static void
873 fixup_debug_insns (rtx reg, rtx replacement, rtx_insn *from, rtx_insn *to)
874 {
875 rtx_insn *insn;
876 for (insn = from; insn != to; insn = NEXT_INSN (insn))
877 {
878 rtx t;
879
880 if (!DEBUG_INSN_P (insn))
881 continue;
882
883 t = INSN_VAR_LOCATION_LOC (insn);
884 t = simplify_replace_rtx (t, reg, replacement);
885 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), t, 0);
886 }
887 }
888
889 /* Subroutine of reload_combine_recognize_const_pattern. Try to replace REG
890 with SRC in the insn described by USE, taking costs into account. Return
891 true if we made the replacement. */
892
893 static bool
894 try_replace_in_use (struct reg_use *use, rtx reg, rtx src)
895 {
896 rtx_insn *use_insn = use->insn;
897 rtx mem = use->containing_mem;
898 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
899
900 if (mem != NULL_RTX)
901 {
902 addr_space_t as = MEM_ADDR_SPACE (mem);
903 rtx oldaddr = XEXP (mem, 0);
904 rtx newaddr = NULL_RTX;
905 int old_cost = address_cost (oldaddr, GET_MODE (mem), as, speed);
906 int new_cost;
907
908 newaddr = simplify_replace_rtx (oldaddr, reg, src);
909 if (memory_address_addr_space_p (GET_MODE (mem), newaddr, as))
910 {
911 XEXP (mem, 0) = newaddr;
912 new_cost = address_cost (newaddr, GET_MODE (mem), as, speed);
913 XEXP (mem, 0) = oldaddr;
914 if (new_cost <= old_cost
915 && validate_change (use_insn,
916 &XEXP (mem, 0), newaddr, 0))
917 return true;
918 }
919 }
920 else
921 {
922 rtx new_set = single_set (use_insn);
923 if (new_set
924 && REG_P (SET_DEST (new_set))
925 && GET_CODE (SET_SRC (new_set)) == PLUS
926 && REG_P (XEXP (SET_SRC (new_set), 0))
927 && CONSTANT_P (XEXP (SET_SRC (new_set), 1)))
928 {
929 rtx new_src;
930 int old_cost = set_src_cost (SET_SRC (new_set), speed);
931
932 gcc_assert (rtx_equal_p (XEXP (SET_SRC (new_set), 0), reg));
933 new_src = simplify_replace_rtx (SET_SRC (new_set), reg, src);
934
935 if (set_src_cost (new_src, speed) <= old_cost
936 && validate_change (use_insn, &SET_SRC (new_set),
937 new_src, 0))
938 return true;
939 }
940 }
941 return false;
942 }
943
944 /* Called by reload_combine when scanning INSN. This function tries to detect
945 patterns where a constant is added to a register, and the result is used
946 in an address.
947 Return true if no further processing is needed on INSN; false if it wasn't
948 recognized and should be handled normally. */
949
950 static bool
951 reload_combine_recognize_const_pattern (rtx_insn *insn)
952 {
953 int from_ruid = reload_combine_ruid;
954 rtx set, pat, reg, src, addreg;
955 unsigned int regno;
956 struct reg_use *use;
957 bool must_move_add;
958 rtx_insn *add_moved_after_insn = NULL;
959 int add_moved_after_ruid = 0;
960 int clobbered_regno = -1;
961
962 set = single_set (insn);
963 if (set == NULL_RTX)
964 return false;
965
966 reg = SET_DEST (set);
967 src = SET_SRC (set);
968 if (!REG_P (reg)
969 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1
970 || GET_MODE (reg) != Pmode
971 || reg == stack_pointer_rtx)
972 return false;
973
974 regno = REGNO (reg);
975
976 /* We look for a REG1 = REG2 + CONSTANT insn, followed by either
977 uses of REG1 inside an address, or inside another add insn. If
978 possible and profitable, merge the addition into subsequent
979 uses. */
980 if (GET_CODE (src) != PLUS
981 || !REG_P (XEXP (src, 0))
982 || !CONSTANT_P (XEXP (src, 1)))
983 return false;
984
985 addreg = XEXP (src, 0);
986 must_move_add = rtx_equal_p (reg, addreg);
987
988 pat = PATTERN (insn);
989 if (must_move_add && set != pat)
990 {
991 /* We have to be careful when moving the add; apart from the
992 single_set there may also be clobbers. Recognize one special
993 case, that of one clobber alongside the set (likely a clobber
994 of the CC register). */
995 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
996 if (XVECLEN (pat, 0) != 2 || XVECEXP (pat, 0, 0) != set
997 || GET_CODE (XVECEXP (pat, 0, 1)) != CLOBBER
998 || !REG_P (XEXP (XVECEXP (pat, 0, 1), 0)))
999 return false;
1000 clobbered_regno = REGNO (XEXP (XVECEXP (pat, 0, 1), 0));
1001 }
1002
1003 do
1004 {
1005 use = reload_combine_closest_single_use (regno, from_ruid);
1006
1007 if (use)
1008 /* Start the search for the next use from here. */
1009 from_ruid = use->ruid;
1010
1011 if (use && GET_MODE (*use->usep) == Pmode)
1012 {
1013 bool delete_add = false;
1014 rtx_insn *use_insn = use->insn;
1015 int use_ruid = use->ruid;
1016
1017 /* Avoid moving the add insn past a jump. */
1018 if (must_move_add && use_ruid <= last_jump_ruid)
1019 break;
1020
1021 /* If the add clobbers another hard reg in parallel, don't move
1022 it past a real set of this hard reg. */
1023 if (must_move_add && clobbered_regno >= 0
1024 && reg_state[clobbered_regno].real_store_ruid >= use_ruid)
1025 break;
1026
1027 #ifdef HAVE_cc0
1028 /* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
1029 if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
1030 break;
1031 #endif
1032
1033 gcc_assert (reg_state[regno].store_ruid <= use_ruid);
1034 /* Avoid moving a use of ADDREG past a point where it is stored. */
1035 if (reg_state[REGNO (addreg)].store_ruid > use_ruid)
1036 break;
1037
1038 /* We also must not move the addition past an insn that sets
1039 the same register, unless we can combine two add insns. */
1040 if (must_move_add && reg_state[regno].store_ruid == use_ruid)
1041 {
1042 if (use->containing_mem == NULL_RTX)
1043 delete_add = true;
1044 else
1045 break;
1046 }
1047
1048 if (try_replace_in_use (use, reg, src))
1049 {
1050 reload_combine_purge_insn_uses (use_insn);
1051 reload_combine_note_use (&PATTERN (use_insn), use_insn,
1052 use_ruid, NULL_RTX);
1053
1054 if (delete_add)
1055 {
1056 fixup_debug_insns (reg, src, insn, use_insn);
1057 delete_insn (insn);
1058 return true;
1059 }
1060 if (must_move_add)
1061 {
1062 add_moved_after_insn = use_insn;
1063 add_moved_after_ruid = use_ruid;
1064 }
1065 continue;
1066 }
1067 }
1068 /* If we get here, we couldn't handle this use. */
1069 if (must_move_add)
1070 break;
1071 }
1072 while (use);
1073
1074 if (!must_move_add || add_moved_after_insn == NULL_RTX)
1075 /* Process the add normally. */
1076 return false;
1077
1078 fixup_debug_insns (reg, src, insn, add_moved_after_insn);
1079
1080 reorder_insns (insn, insn, add_moved_after_insn);
1081 reload_combine_purge_reg_uses_after_ruid (regno, add_moved_after_ruid);
1082 reload_combine_split_ruids (add_moved_after_ruid - 1);
1083 reload_combine_note_use (&PATTERN (insn), insn,
1084 add_moved_after_ruid, NULL_RTX);
1085 reg_state[regno].store_ruid = add_moved_after_ruid;
1086
1087 return true;
1088 }
1089
1090 /* Called by reload_combine when scanning INSN. Try to detect a pattern we
1091 can handle and improve. Return true if no further processing is needed on
1092 INSN; false if it wasn't recognized and should be handled normally. */
1093
1094 static bool
1095 reload_combine_recognize_pattern (rtx_insn *insn)
1096 {
1097 rtx set, reg, src;
1098 unsigned int regno;
1099
1100 set = single_set (insn);
1101 if (set == NULL_RTX)
1102 return false;
1103
1104 reg = SET_DEST (set);
1105 src = SET_SRC (set);
1106 if (!REG_P (reg)
1107 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1)
1108 return false;
1109
1110 regno = REGNO (reg);
1111
1112 /* Look for (set (REGX) (CONST_INT))
1113 (set (REGX) (PLUS (REGX) (REGY)))
1114 ...
1115 ... (MEM (REGX)) ...
1116 and convert it to
1117 (set (REGZ) (CONST_INT))
1118 ...
1119 ... (MEM (PLUS (REGZ) (REGY)))... .
1120
1121 First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
1122 and that we know all uses of REGX before it dies.
1123 Also, explicitly check that REGX != REGY; our life information
1124 does not yet show whether REGY changes in this insn. */
1125
1126 if (GET_CODE (src) == PLUS
1127 && reg_state[regno].all_offsets_match
1128 && last_index_reg != -1
1129 && REG_P (XEXP (src, 1))
1130 && rtx_equal_p (XEXP (src, 0), reg)
1131 && !rtx_equal_p (XEXP (src, 1), reg)
1132 && reg_state[regno].use_index >= 0
1133 && reg_state[regno].use_index < RELOAD_COMBINE_MAX_USES
1134 && last_label_ruid < reg_state[regno].use_ruid)
1135 {
1136 rtx base = XEXP (src, 1);
1137 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
1138 rtx prev_set = prev ? single_set (prev) : NULL_RTX;
1139 rtx index_reg = NULL_RTX;
1140 rtx reg_sum = NULL_RTX;
1141 int i;
1142
1143 /* Now we need to set INDEX_REG to an index register (denoted as
1144 REGZ in the illustration above) and REG_SUM to the expression
1145 register+register that we want to use to substitute uses of REG
1146 (typically in MEMs) with. First check REG and BASE for being
1147 index registers; we can use them even if they are not dead. */
1148 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
1149 || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
1150 REGNO (base)))
1151 {
1152 index_reg = reg;
1153 reg_sum = src;
1154 }
1155 else
1156 {
1157 /* Otherwise, look for a free index register. Since we have
1158 checked above that neither REG nor BASE are index registers,
1159 if we find anything at all, it will be different from these
1160 two registers. */
1161 for (i = first_index_reg; i <= last_index_reg; i++)
1162 {
1163 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
1164 && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
1165 && reg_state[i].store_ruid <= reg_state[regno].use_ruid
1166 && (call_used_regs[i] || df_regs_ever_live_p (i))
1167 && (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
1168 && !fixed_regs[i] && !global_regs[i]
1169 && hard_regno_nregs[i][GET_MODE (reg)] == 1
1170 && targetm.hard_regno_scratch_ok (i))
1171 {
1172 index_reg = gen_rtx_REG (GET_MODE (reg), i);
1173 reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
1174 break;
1175 }
1176 }
1177 }
1178
1179 /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
1180 (REGY), i.e. BASE, is not clobbered before the last use we'll
1181 create. */
1182 if (reg_sum
1183 && prev_set
1184 && CONST_INT_P (SET_SRC (prev_set))
1185 && rtx_equal_p (SET_DEST (prev_set), reg)
1186 && (reg_state[REGNO (base)].store_ruid
1187 <= reg_state[regno].use_ruid))
1188 {
1189 /* Change destination register and, if necessary, the constant
1190 value in PREV, the constant loading instruction. */
1191 validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
1192 if (reg_state[regno].offset != const0_rtx)
1193 validate_change (prev,
1194 &SET_SRC (prev_set),
1195 GEN_INT (INTVAL (SET_SRC (prev_set))
1196 + INTVAL (reg_state[regno].offset)),
1197 1);
1198
1199 /* Now for every use of REG that we have recorded, replace REG
1200 with REG_SUM. */
1201 for (i = reg_state[regno].use_index;
1202 i < RELOAD_COMBINE_MAX_USES; i++)
1203 validate_unshare_change (reg_state[regno].reg_use[i].insn,
1204 reg_state[regno].reg_use[i].usep,
1205 /* Each change must have its own
1206 replacement. */
1207 reg_sum, 1);
1208
1209 if (apply_change_group ())
1210 {
1211 struct reg_use *lowest_ruid = NULL;
1212
1213 /* For every new use of REG_SUM, we have to record the use
1214 of BASE therein, i.e. operand 1. */
1215 for (i = reg_state[regno].use_index;
1216 i < RELOAD_COMBINE_MAX_USES; i++)
1217 {
1218 struct reg_use *use = reg_state[regno].reg_use + i;
1219 reload_combine_note_use (&XEXP (*use->usep, 1), use->insn,
1220 use->ruid, use->containing_mem);
1221 if (lowest_ruid == NULL || use->ruid < lowest_ruid->ruid)
1222 lowest_ruid = use;
1223 }
1224
1225 fixup_debug_insns (reg, reg_sum, insn, lowest_ruid->insn);
1226
1227 /* Delete the reg-reg addition. */
1228 delete_insn (insn);
1229
1230 if (reg_state[regno].offset != const0_rtx)
1231 /* Previous REG_EQUIV / REG_EQUAL notes for PREV
1232 are now invalid. */
1233 remove_reg_equal_equiv_notes (prev);
1234
1235 reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
1236 return true;
1237 }
1238 }
1239 }
1240 return false;
1241 }
1242
1243 static void
1244 reload_combine (void)
1245 {
1246 rtx_insn *insn, *prev;
1247 basic_block bb;
1248 unsigned int r;
1249 int min_labelno, n_labels;
1250 HARD_REG_SET ever_live_at_start, *label_live;
1251
1252 /* To avoid wasting too much time later searching for an index register,
1253 determine the minimum and maximum index register numbers. */
1254 if (INDEX_REG_CLASS == NO_REGS)
1255 last_index_reg = -1;
1256 else if (first_index_reg == -1 && last_index_reg == 0)
1257 {
1258 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1259 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
1260 {
1261 if (first_index_reg == -1)
1262 first_index_reg = r;
1263
1264 last_index_reg = r;
1265 }
1266
1267 /* If no index register is available, we can quit now. Set LAST_INDEX_REG
1268 to -1 so we'll know to quit early the next time we get here. */
1269 if (first_index_reg == -1)
1270 {
1271 last_index_reg = -1;
1272 return;
1273 }
1274 }
1275
1276 /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime
1277 information is a bit fuzzy immediately after reload, but it's
1278 still good enough to determine which registers are live at a jump
1279 destination. */
1280 min_labelno = get_first_label_num ();
1281 n_labels = max_label_num () - min_labelno;
1282 label_live = XNEWVEC (HARD_REG_SET, n_labels);
1283 CLEAR_HARD_REG_SET (ever_live_at_start);
1284
1285 FOR_EACH_BB_REVERSE_FN (bb, cfun)
1286 {
1287 insn = BB_HEAD (bb);
1288 if (LABEL_P (insn))
1289 {
1290 HARD_REG_SET live;
1291 bitmap live_in = df_get_live_in (bb);
1292
1293 REG_SET_TO_HARD_REG_SET (live, live_in);
1294 compute_use_by_pseudos (&live, live_in);
1295 COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
1296 IOR_HARD_REG_SET (ever_live_at_start, live);
1297 }
1298 }
1299
1300 /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */
1301 last_label_ruid = last_jump_ruid = reload_combine_ruid = 0;
1302 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1303 {
1304 reg_state[r].store_ruid = 0;
1305 reg_state[r].real_store_ruid = 0;
1306 if (fixed_regs[r])
1307 reg_state[r].use_index = -1;
1308 else
1309 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1310 }
1311
1312 for (insn = get_last_insn (); insn; insn = prev)
1313 {
1314 bool control_flow_insn;
1315 rtx note;
1316
1317 prev = PREV_INSN (insn);
1318
1319 /* We cannot do our optimization across labels. Invalidating all the use
1320 information we have would be costly, so we just note where the label
1321 is and then later disable any optimization that would cross it. */
1322 if (LABEL_P (insn))
1323 last_label_ruid = reload_combine_ruid;
1324 else if (BARRIER_P (insn))
1325 {
1326 /* Crossing a barrier resets all the use information. */
1327 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1328 if (! fixed_regs[r])
1329 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1330 }
1331 else if (INSN_P (insn) && volatile_insn_p (PATTERN (insn)))
1332 /* Optimizations across insns being marked as volatile must be
1333 prevented. All the usage information is invalidated
1334 here. */
1335 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1336 if (! fixed_regs[r]
1337 && reg_state[r].use_index != RELOAD_COMBINE_MAX_USES)
1338 reg_state[r].use_index = -1;
1339
1340 if (! NONDEBUG_INSN_P (insn))
1341 continue;
1342
1343 reload_combine_ruid++;
1344
1345 control_flow_insn = control_flow_insn_p (insn);
1346 if (control_flow_insn)
1347 last_jump_ruid = reload_combine_ruid;
1348
1349 if (reload_combine_recognize_const_pattern (insn)
1350 || reload_combine_recognize_pattern (insn))
1351 continue;
1352
1353 note_stores (PATTERN (insn), reload_combine_note_store, NULL);
1354
1355 if (CALL_P (insn))
1356 {
1357 rtx link;
1358
1359 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1360 if (call_used_regs[r])
1361 {
1362 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1363 reg_state[r].store_ruid = reload_combine_ruid;
1364 }
1365
1366 for (link = CALL_INSN_FUNCTION_USAGE (insn); link;
1367 link = XEXP (link, 1))
1368 {
1369 rtx setuse = XEXP (link, 0);
1370 rtx usage_rtx = XEXP (setuse, 0);
1371 if ((GET_CODE (setuse) == USE || GET_CODE (setuse) == CLOBBER)
1372 && REG_P (usage_rtx))
1373 {
1374 unsigned int i;
1375 unsigned int start_reg = REGNO (usage_rtx);
1376 unsigned int num_regs
1377 = hard_regno_nregs[start_reg][GET_MODE (usage_rtx)];
1378 unsigned int end_reg = start_reg + num_regs - 1;
1379 for (i = start_reg; i <= end_reg; i++)
1380 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1381 {
1382 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1383 reg_state[i].store_ruid = reload_combine_ruid;
1384 }
1385 else
1386 reg_state[i].use_index = -1;
1387 }
1388 }
1389 }
1390
1391 if (control_flow_insn && !ANY_RETURN_P (PATTERN (insn)))
1392 {
1393 /* Non-spill registers might be used at the call destination in
1394 some unknown fashion, so we have to mark the unknown use. */
1395 HARD_REG_SET *live;
1396
1397 if ((condjump_p (insn) || condjump_in_parallel_p (insn))
1398 && JUMP_LABEL (insn))
1399 {
1400 if (ANY_RETURN_P (JUMP_LABEL (insn)))
1401 live = NULL;
1402 else
1403 live = &LABEL_LIVE (JUMP_LABEL (insn));
1404 }
1405 else
1406 live = &ever_live_at_start;
1407
1408 if (live)
1409 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1410 if (TEST_HARD_REG_BIT (*live, r))
1411 reg_state[r].use_index = -1;
1412 }
1413
1414 reload_combine_note_use (&PATTERN (insn), insn, reload_combine_ruid,
1415 NULL_RTX);
1416
1417 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1418 {
1419 if (REG_NOTE_KIND (note) == REG_INC && REG_P (XEXP (note, 0)))
1420 {
1421 int regno = REGNO (XEXP (note, 0));
1422 reg_state[regno].store_ruid = reload_combine_ruid;
1423 reg_state[regno].real_store_ruid = reload_combine_ruid;
1424 reg_state[regno].use_index = -1;
1425 }
1426 }
1427 }
1428
1429 free (label_live);
1430 }
1431
1432 /* Check if DST is a register or a subreg of a register; if it is,
1433 update store_ruid, real_store_ruid and use_index in the reg_state
1434 structure accordingly. Called via note_stores from reload_combine. */
1435
1436 static void
1437 reload_combine_note_store (rtx dst, const_rtx set, void *data ATTRIBUTE_UNUSED)
1438 {
1439 int regno = 0;
1440 int i;
1441 machine_mode mode = GET_MODE (dst);
1442
1443 if (GET_CODE (dst) == SUBREG)
1444 {
1445 regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)),
1446 GET_MODE (SUBREG_REG (dst)),
1447 SUBREG_BYTE (dst),
1448 GET_MODE (dst));
1449 dst = SUBREG_REG (dst);
1450 }
1451
1452 /* Some targets do argument pushes without adding REG_INC notes. */
1453
1454 if (MEM_P (dst))
1455 {
1456 dst = XEXP (dst, 0);
1457 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
1458 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC
1459 || GET_CODE (dst) == PRE_MODIFY || GET_CODE (dst) == POST_MODIFY)
1460 {
1461 regno = REGNO (XEXP (dst, 0));
1462 mode = GET_MODE (XEXP (dst, 0));
1463 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1464 {
1465 /* We could probably do better, but for now mark the register
1466 as used in an unknown fashion and set/clobbered at this
1467 insn. */
1468 reg_state[i].use_index = -1;
1469 reg_state[i].store_ruid = reload_combine_ruid;
1470 reg_state[i].real_store_ruid = reload_combine_ruid;
1471 }
1472 }
1473 else
1474 return;
1475 }
1476
1477 if (!REG_P (dst))
1478 return;
1479 regno += REGNO (dst);
1480
1481 /* note_stores might have stripped a STRICT_LOW_PART, so we have to be
1482 careful with registers / register parts that are not full words.
1483 Similarly for ZERO_EXTRACT. */
1484 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
1485 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART)
1486 {
1487 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1488 {
1489 reg_state[i].use_index = -1;
1490 reg_state[i].store_ruid = reload_combine_ruid;
1491 reg_state[i].real_store_ruid = reload_combine_ruid;
1492 }
1493 }
1494 else
1495 {
1496 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1497 {
1498 reg_state[i].store_ruid = reload_combine_ruid;
1499 if (GET_CODE (set) == SET)
1500 reg_state[i].real_store_ruid = reload_combine_ruid;
1501 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1502 }
1503 }
1504 }
1505
1506 /* XP points to a piece of rtl that has to be checked for any uses of
1507 registers.
1508 *XP is the pattern of INSN, or a part of it.
1509 Called from reload_combine, and recursively by itself. */
1510 static void
1511 reload_combine_note_use (rtx *xp, rtx_insn *insn, int ruid, rtx containing_mem)
1512 {
1513 rtx x = *xp;
1514 enum rtx_code code = x->code;
1515 const char *fmt;
1516 int i, j;
1517 rtx offset = const0_rtx; /* For the REG case below. */
1518
1519 switch (code)
1520 {
1521 case SET:
1522 if (REG_P (SET_DEST (x)))
1523 {
1524 reload_combine_note_use (&SET_SRC (x), insn, ruid, NULL_RTX);
1525 return;
1526 }
1527 break;
1528
1529 case USE:
1530 /* If this is the USE of a return value, we can't change it. */
1531 if (REG_P (XEXP (x, 0)) && REG_FUNCTION_VALUE_P (XEXP (x, 0)))
1532 {
1533 /* Mark the return register as used in an unknown fashion. */
1534 rtx reg = XEXP (x, 0);
1535 int regno = REGNO (reg);
1536 int nregs = hard_regno_nregs[regno][GET_MODE (reg)];
1537
1538 while (--nregs >= 0)
1539 reg_state[regno + nregs].use_index = -1;
1540 return;
1541 }
1542 break;
1543
1544 case CLOBBER:
1545 if (REG_P (SET_DEST (x)))
1546 {
1547 /* No spurious CLOBBERs of pseudo registers may remain. */
1548 gcc_assert (REGNO (SET_DEST (x)) < FIRST_PSEUDO_REGISTER);
1549 return;
1550 }
1551 break;
1552
1553 case PLUS:
1554 /* We are interested in (plus (reg) (const_int)) . */
1555 if (!REG_P (XEXP (x, 0))
1556 || !CONST_INT_P (XEXP (x, 1)))
1557 break;
1558 offset = XEXP (x, 1);
1559 x = XEXP (x, 0);
1560 /* Fall through. */
1561 case REG:
1562 {
1563 int regno = REGNO (x);
1564 int use_index;
1565 int nregs;
1566
1567 /* No spurious USEs of pseudo registers may remain. */
1568 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1569
1570 nregs = hard_regno_nregs[regno][GET_MODE (x)];
1571
1572 /* We can't substitute into multi-hard-reg uses. */
1573 if (nregs > 1)
1574 {
1575 while (--nregs >= 0)
1576 reg_state[regno + nregs].use_index = -1;
1577 return;
1578 }
1579
1580 /* We may be called to update uses in previously seen insns.
1581 Don't add uses beyond the last store we saw. */
1582 if (ruid < reg_state[regno].store_ruid)
1583 return;
1584
1585 /* If this register is already used in some unknown fashion, we
1586 can't do anything.
1587 If we decrement the index from zero to -1, we can't store more
1588 uses, so this register becomes used in an unknown fashion. */
1589 use_index = --reg_state[regno].use_index;
1590 if (use_index < 0)
1591 return;
1592
1593 if (use_index == RELOAD_COMBINE_MAX_USES - 1)
1594 {
1595 /* This is the first use of this register we have seen since we
1596 marked it as dead. */
1597 reg_state[regno].offset = offset;
1598 reg_state[regno].all_offsets_match = true;
1599 reg_state[regno].use_ruid = ruid;
1600 }
1601 else
1602 {
1603 if (reg_state[regno].use_ruid > ruid)
1604 reg_state[regno].use_ruid = ruid;
1605
1606 if (! rtx_equal_p (offset, reg_state[regno].offset))
1607 reg_state[regno].all_offsets_match = false;
1608 }
1609
1610 reg_state[regno].reg_use[use_index].insn = insn;
1611 reg_state[regno].reg_use[use_index].ruid = ruid;
1612 reg_state[regno].reg_use[use_index].containing_mem = containing_mem;
1613 reg_state[regno].reg_use[use_index].usep = xp;
1614 return;
1615 }
1616
1617 case MEM:
1618 containing_mem = x;
1619 break;
1620
1621 default:
1622 break;
1623 }
1624
1625 /* Recursively process the components of X. */
1626 fmt = GET_RTX_FORMAT (code);
1627 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1628 {
1629 if (fmt[i] == 'e')
1630 reload_combine_note_use (&XEXP (x, i), insn, ruid, containing_mem);
1631 else if (fmt[i] == 'E')
1632 {
1633 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1634 reload_combine_note_use (&XVECEXP (x, i, j), insn, ruid,
1635 containing_mem);
1636 }
1637 }
1638 }
1639 \f
1640 /* See if we can reduce the cost of a constant by replacing a move
1641 with an add. We track situations in which a register is set to a
1642 constant or to a register plus a constant. */
1643 /* We cannot do our optimization across labels. Invalidating all the
1644 information about register contents we have would be costly, so we
1645 use move2add_last_label_luid to note where the label is and then
1646 later disable any optimization that would cross it.
1647 reg_offset[n] / reg_base_reg[n] / reg_symbol_ref[n] / reg_mode[n]
1648 are only valid if reg_set_luid[n] is greater than
1649 move2add_last_label_luid.
1650 For a set that established a new (potential) base register with
1651 non-constant value, we use move2add_luid from the place where the
1652 setting insn is encountered; registers based off that base then
1653 get the same reg_set_luid. Constants all get
1654 move2add_last_label_luid + 1 as their reg_set_luid. */
1655 static int reg_set_luid[FIRST_PSEUDO_REGISTER];
1656
1657 /* If reg_base_reg[n] is negative, register n has been set to
1658 reg_offset[n] or reg_symbol_ref[n] + reg_offset[n] in mode reg_mode[n].
1659 If reg_base_reg[n] is non-negative, register n has been set to the
1660 sum of reg_offset[n] and the value of register reg_base_reg[n]
1661 before reg_set_luid[n], calculated in mode reg_mode[n] .
1662 For multi-hard-register registers, all but the first one are
1663 recorded as BLKmode in reg_mode. Setting reg_mode to VOIDmode
1664 marks it as invalid. */
1665 static HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
1666 static int reg_base_reg[FIRST_PSEUDO_REGISTER];
1667 static rtx reg_symbol_ref[FIRST_PSEUDO_REGISTER];
1668 static machine_mode reg_mode[FIRST_PSEUDO_REGISTER];
1669
1670 /* move2add_luid is linearly increased while scanning the instructions
1671 from first to last. It is used to set reg_set_luid in
1672 reload_cse_move2add and move2add_note_store. */
1673 static int move2add_luid;
1674
1675 /* move2add_last_label_luid is set whenever a label is found. Labels
1676 invalidate all previously collected reg_offset data. */
1677 static int move2add_last_label_luid;
1678
1679 /* ??? We don't know how zero / sign extension is handled, hence we
1680 can't go from a narrower to a wider mode. */
1681 #define MODES_OK_FOR_MOVE2ADD(OUTMODE, INMODE) \
1682 (GET_MODE_SIZE (OUTMODE) == GET_MODE_SIZE (INMODE) \
1683 || (GET_MODE_SIZE (OUTMODE) <= GET_MODE_SIZE (INMODE) \
1684 && TRULY_NOOP_TRUNCATION_MODES_P (OUTMODE, INMODE)))
1685
1686 /* Record that REG is being set to a value with the mode of REG. */
1687
1688 static void
1689 move2add_record_mode (rtx reg)
1690 {
1691 int regno, nregs;
1692 machine_mode mode = GET_MODE (reg);
1693
1694 if (GET_CODE (reg) == SUBREG)
1695 {
1696 regno = subreg_regno (reg);
1697 nregs = subreg_nregs (reg);
1698 }
1699 else if (REG_P (reg))
1700 {
1701 regno = REGNO (reg);
1702 nregs = hard_regno_nregs[regno][mode];
1703 }
1704 else
1705 gcc_unreachable ();
1706 for (int i = nregs - 1; i > 0; i--)
1707 reg_mode[regno + i] = BLKmode;
1708 reg_mode[regno] = mode;
1709 }
1710
1711 /* Record that REG is being set to the sum of SYM and OFF. */
1712
1713 static void
1714 move2add_record_sym_value (rtx reg, rtx sym, rtx off)
1715 {
1716 int regno = REGNO (reg);
1717
1718 move2add_record_mode (reg);
1719 reg_set_luid[regno] = move2add_luid;
1720 reg_base_reg[regno] = -1;
1721 reg_symbol_ref[regno] = sym;
1722 reg_offset[regno] = INTVAL (off);
1723 }
1724
1725 /* Check if REGNO contains a valid value in MODE. */
1726
1727 static bool
1728 move2add_valid_value_p (int regno, machine_mode mode)
1729 {
1730 if (reg_set_luid[regno] <= move2add_last_label_luid)
1731 return false;
1732
1733 if (mode != reg_mode[regno])
1734 {
1735 if (!MODES_OK_FOR_MOVE2ADD (mode, reg_mode[regno]))
1736 return false;
1737 /* The value loaded into regno in reg_mode[regno] is also valid in
1738 mode after truncation only if (REG:mode regno) is the lowpart of
1739 (REG:reg_mode[regno] regno). Now, for big endian, the starting
1740 regno of the lowpart might be different. */
1741 int s_off = subreg_lowpart_offset (mode, reg_mode[regno]);
1742 s_off = subreg_regno_offset (regno, reg_mode[regno], s_off, mode);
1743 if (s_off != 0)
1744 /* We could in principle adjust regno, check reg_mode[regno] to be
1745 BLKmode, and return s_off to the caller (vs. -1 for failure),
1746 but we currently have no callers that could make use of this
1747 information. */
1748 return false;
1749 }
1750
1751 for (int i = hard_regno_nregs[regno][mode] - 1; i > 0; i--)
1752 if (reg_mode[regno + i] != BLKmode)
1753 return false;
1754 return true;
1755 }
1756
1757 /* This function is called with INSN that sets REG to (SYM + OFF),
1758 while REG is known to already have value (SYM + offset).
1759 This function tries to change INSN into an add instruction
1760 (set (REG) (plus (REG) (OFF - offset))) using the known value.
1761 It also updates the information about REG's known value.
1762 Return true if we made a change. */
1763
1764 static bool
1765 move2add_use_add2_insn (rtx reg, rtx sym, rtx off, rtx_insn *insn)
1766 {
1767 rtx pat = PATTERN (insn);
1768 rtx src = SET_SRC (pat);
1769 int regno = REGNO (reg);
1770 rtx new_src = gen_int_mode (UINTVAL (off) - reg_offset[regno],
1771 GET_MODE (reg));
1772 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1773 bool changed = false;
1774
1775 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1776 use (set (reg) (reg)) instead.
1777 We don't delete this insn, nor do we convert it into a
1778 note, to avoid losing register notes or the return
1779 value flag. jump2 already knows how to get rid of
1780 no-op moves. */
1781 if (new_src == const0_rtx)
1782 {
1783 /* If the constants are different, this is a
1784 truncation, that, if turned into (set (reg)
1785 (reg)), would be discarded. Maybe we should
1786 try a truncMN pattern? */
1787 if (INTVAL (off) == reg_offset [regno])
1788 changed = validate_change (insn, &SET_SRC (pat), reg, 0);
1789 }
1790 else
1791 {
1792 struct full_rtx_costs oldcst, newcst;
1793 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
1794
1795 get_full_set_rtx_cost (pat, &oldcst);
1796 SET_SRC (pat) = tem;
1797 get_full_set_rtx_cost (pat, &newcst);
1798 SET_SRC (pat) = src;
1799
1800 if (costs_lt_p (&newcst, &oldcst, speed)
1801 && have_add2_insn (reg, new_src))
1802 changed = validate_change (insn, &SET_SRC (pat), tem, 0);
1803 else if (sym == NULL_RTX && GET_MODE (reg) != BImode)
1804 {
1805 machine_mode narrow_mode;
1806 for (narrow_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
1807 narrow_mode != VOIDmode
1808 && narrow_mode != GET_MODE (reg);
1809 narrow_mode = GET_MODE_WIDER_MODE (narrow_mode))
1810 {
1811 if (have_insn_for (STRICT_LOW_PART, narrow_mode)
1812 && ((reg_offset[regno] & ~GET_MODE_MASK (narrow_mode))
1813 == (INTVAL (off) & ~GET_MODE_MASK (narrow_mode))))
1814 {
1815 rtx narrow_reg = gen_lowpart_common (narrow_mode, reg);
1816 rtx narrow_src = gen_int_mode (INTVAL (off),
1817 narrow_mode);
1818 rtx new_set
1819 = gen_rtx_SET (VOIDmode,
1820 gen_rtx_STRICT_LOW_PART (VOIDmode,
1821 narrow_reg),
1822 narrow_src);
1823 get_full_set_rtx_cost (new_set, &newcst);
1824 if (costs_lt_p (&newcst, &oldcst, speed))
1825 {
1826 changed = validate_change (insn, &PATTERN (insn),
1827 new_set, 0);
1828 if (changed)
1829 break;
1830 }
1831 }
1832 }
1833 }
1834 }
1835 move2add_record_sym_value (reg, sym, off);
1836 return changed;
1837 }
1838
1839
1840 /* This function is called with INSN that sets REG to (SYM + OFF),
1841 but REG doesn't have known value (SYM + offset). This function
1842 tries to find another register which is known to already have
1843 value (SYM + offset) and change INSN into an add instruction
1844 (set (REG) (plus (the found register) (OFF - offset))) if such
1845 a register is found. It also updates the information about
1846 REG's known value.
1847 Return true iff we made a change. */
1848
1849 static bool
1850 move2add_use_add3_insn (rtx reg, rtx sym, rtx off, rtx_insn *insn)
1851 {
1852 rtx pat = PATTERN (insn);
1853 rtx src = SET_SRC (pat);
1854 int regno = REGNO (reg);
1855 int min_regno = 0;
1856 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1857 int i;
1858 bool changed = false;
1859 struct full_rtx_costs oldcst, newcst, mincst;
1860 rtx plus_expr;
1861
1862 init_costs_to_max (&mincst);
1863 get_full_set_rtx_cost (pat, &oldcst);
1864
1865 plus_expr = gen_rtx_PLUS (GET_MODE (reg), reg, const0_rtx);
1866 SET_SRC (pat) = plus_expr;
1867
1868 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1869 if (move2add_valid_value_p (i, GET_MODE (reg))
1870 && reg_base_reg[i] < 0
1871 && reg_symbol_ref[i] != NULL_RTX
1872 && rtx_equal_p (sym, reg_symbol_ref[i]))
1873 {
1874 rtx new_src = gen_int_mode (UINTVAL (off) - reg_offset[i],
1875 GET_MODE (reg));
1876 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1877 use (set (reg) (reg)) instead.
1878 We don't delete this insn, nor do we convert it into a
1879 note, to avoid losing register notes or the return
1880 value flag. jump2 already knows how to get rid of
1881 no-op moves. */
1882 if (new_src == const0_rtx)
1883 {
1884 init_costs_to_zero (&mincst);
1885 min_regno = i;
1886 break;
1887 }
1888 else
1889 {
1890 XEXP (plus_expr, 1) = new_src;
1891 get_full_set_rtx_cost (pat, &newcst);
1892
1893 if (costs_lt_p (&newcst, &mincst, speed))
1894 {
1895 mincst = newcst;
1896 min_regno = i;
1897 }
1898 }
1899 }
1900 SET_SRC (pat) = src;
1901
1902 if (costs_lt_p (&mincst, &oldcst, speed))
1903 {
1904 rtx tem;
1905
1906 tem = gen_rtx_REG (GET_MODE (reg), min_regno);
1907 if (i != min_regno)
1908 {
1909 rtx new_src = gen_int_mode (UINTVAL (off) - reg_offset[min_regno],
1910 GET_MODE (reg));
1911 tem = gen_rtx_PLUS (GET_MODE (reg), tem, new_src);
1912 }
1913 if (validate_change (insn, &SET_SRC (pat), tem, 0))
1914 changed = true;
1915 }
1916 reg_set_luid[regno] = move2add_luid;
1917 move2add_record_sym_value (reg, sym, off);
1918 return changed;
1919 }
1920
1921 /* Convert move insns with constant inputs to additions if they are cheaper.
1922 Return true if any changes were made. */
1923 static bool
1924 reload_cse_move2add (rtx_insn *first)
1925 {
1926 int i;
1927 rtx_insn *insn;
1928 bool changed = false;
1929
1930 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
1931 {
1932 reg_set_luid[i] = 0;
1933 reg_offset[i] = 0;
1934 reg_base_reg[i] = 0;
1935 reg_symbol_ref[i] = NULL_RTX;
1936 reg_mode[i] = VOIDmode;
1937 }
1938
1939 move2add_last_label_luid = 0;
1940 move2add_luid = 2;
1941 for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++)
1942 {
1943 rtx pat, note;
1944
1945 if (LABEL_P (insn))
1946 {
1947 move2add_last_label_luid = move2add_luid;
1948 /* We're going to increment move2add_luid twice after a
1949 label, so that we can use move2add_last_label_luid + 1 as
1950 the luid for constants. */
1951 move2add_luid++;
1952 continue;
1953 }
1954 if (! INSN_P (insn))
1955 continue;
1956 pat = PATTERN (insn);
1957 /* For simplicity, we only perform this optimization on
1958 straightforward SETs. */
1959 if (GET_CODE (pat) == SET
1960 && REG_P (SET_DEST (pat)))
1961 {
1962 rtx reg = SET_DEST (pat);
1963 int regno = REGNO (reg);
1964 rtx src = SET_SRC (pat);
1965
1966 /* Check if we have valid information on the contents of this
1967 register in the mode of REG. */
1968 if (move2add_valid_value_p (regno, GET_MODE (reg))
1969 && dbg_cnt (cse2_move2add))
1970 {
1971 /* Try to transform (set (REGX) (CONST_INT A))
1972 ...
1973 (set (REGX) (CONST_INT B))
1974 to
1975 (set (REGX) (CONST_INT A))
1976 ...
1977 (set (REGX) (plus (REGX) (CONST_INT B-A)))
1978 or
1979 (set (REGX) (CONST_INT A))
1980 ...
1981 (set (STRICT_LOW_PART (REGX)) (CONST_INT B))
1982 */
1983
1984 if (CONST_INT_P (src)
1985 && reg_base_reg[regno] < 0
1986 && reg_symbol_ref[regno] == NULL_RTX)
1987 {
1988 changed |= move2add_use_add2_insn (reg, NULL_RTX, src, insn);
1989 continue;
1990 }
1991
1992 /* Try to transform (set (REGX) (REGY))
1993 (set (REGX) (PLUS (REGX) (CONST_INT A)))
1994 ...
1995 (set (REGX) (REGY))
1996 (set (REGX) (PLUS (REGX) (CONST_INT B)))
1997 to
1998 (set (REGX) (REGY))
1999 (set (REGX) (PLUS (REGX) (CONST_INT A)))
2000 ...
2001 (set (REGX) (plus (REGX) (CONST_INT B-A))) */
2002 else if (REG_P (src)
2003 && reg_set_luid[regno] == reg_set_luid[REGNO (src)]
2004 && reg_base_reg[regno] == reg_base_reg[REGNO (src)]
2005 && move2add_valid_value_p (REGNO (src), GET_MODE (reg)))
2006 {
2007 rtx_insn *next = next_nonnote_nondebug_insn (insn);
2008 rtx set = NULL_RTX;
2009 if (next)
2010 set = single_set (next);
2011 if (set
2012 && SET_DEST (set) == reg
2013 && GET_CODE (SET_SRC (set)) == PLUS
2014 && XEXP (SET_SRC (set), 0) == reg
2015 && CONST_INT_P (XEXP (SET_SRC (set), 1)))
2016 {
2017 rtx src3 = XEXP (SET_SRC (set), 1);
2018 unsigned HOST_WIDE_INT added_offset = UINTVAL (src3);
2019 HOST_WIDE_INT base_offset = reg_offset[REGNO (src)];
2020 HOST_WIDE_INT regno_offset = reg_offset[regno];
2021 rtx new_src =
2022 gen_int_mode (added_offset
2023 + base_offset
2024 - regno_offset,
2025 GET_MODE (reg));
2026 bool success = false;
2027 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
2028
2029 if (new_src == const0_rtx)
2030 /* See above why we create (set (reg) (reg)) here. */
2031 success
2032 = validate_change (next, &SET_SRC (set), reg, 0);
2033 else
2034 {
2035 rtx old_src = SET_SRC (set);
2036 struct full_rtx_costs oldcst, newcst;
2037 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
2038
2039 get_full_set_rtx_cost (set, &oldcst);
2040 SET_SRC (set) = tem;
2041 get_full_set_src_cost (tem, &newcst);
2042 SET_SRC (set) = old_src;
2043 costs_add_n_insns (&oldcst, 1);
2044
2045 if (costs_lt_p (&newcst, &oldcst, speed)
2046 && have_add2_insn (reg, new_src))
2047 {
2048 rtx newpat = gen_rtx_SET (VOIDmode, reg, tem);
2049 success
2050 = validate_change (next, &PATTERN (next),
2051 newpat, 0);
2052 }
2053 }
2054 if (success)
2055 delete_insn (insn);
2056 changed |= success;
2057 insn = next;
2058 move2add_record_mode (reg);
2059 reg_offset[regno]
2060 = trunc_int_for_mode (added_offset + base_offset,
2061 GET_MODE (reg));
2062 continue;
2063 }
2064 }
2065 }
2066
2067 /* Try to transform
2068 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
2069 ...
2070 (set (REGY) (CONST (PLUS (SYMBOL_REF) (CONST_INT B))))
2071 to
2072 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
2073 ...
2074 (set (REGY) (CONST (PLUS (REGX) (CONST_INT B-A)))) */
2075 if ((GET_CODE (src) == SYMBOL_REF
2076 || (GET_CODE (src) == CONST
2077 && GET_CODE (XEXP (src, 0)) == PLUS
2078 && GET_CODE (XEXP (XEXP (src, 0), 0)) == SYMBOL_REF
2079 && CONST_INT_P (XEXP (XEXP (src, 0), 1))))
2080 && dbg_cnt (cse2_move2add))
2081 {
2082 rtx sym, off;
2083
2084 if (GET_CODE (src) == SYMBOL_REF)
2085 {
2086 sym = src;
2087 off = const0_rtx;
2088 }
2089 else
2090 {
2091 sym = XEXP (XEXP (src, 0), 0);
2092 off = XEXP (XEXP (src, 0), 1);
2093 }
2094
2095 /* If the reg already contains the value which is sum of
2096 sym and some constant value, we can use an add2 insn. */
2097 if (move2add_valid_value_p (regno, GET_MODE (reg))
2098 && reg_base_reg[regno] < 0
2099 && reg_symbol_ref[regno] != NULL_RTX
2100 && rtx_equal_p (sym, reg_symbol_ref[regno]))
2101 changed |= move2add_use_add2_insn (reg, sym, off, insn);
2102
2103 /* Otherwise, we have to find a register whose value is sum
2104 of sym and some constant value. */
2105 else
2106 changed |= move2add_use_add3_insn (reg, sym, off, insn);
2107
2108 continue;
2109 }
2110 }
2111
2112 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2113 {
2114 if (REG_NOTE_KIND (note) == REG_INC
2115 && REG_P (XEXP (note, 0)))
2116 {
2117 /* Reset the information about this register. */
2118 int regno = REGNO (XEXP (note, 0));
2119 if (regno < FIRST_PSEUDO_REGISTER)
2120 {
2121 move2add_record_mode (XEXP (note, 0));
2122 reg_mode[regno] = VOIDmode;
2123 }
2124 }
2125 }
2126 note_stores (PATTERN (insn), move2add_note_store, insn);
2127
2128 /* If INSN is a conditional branch, we try to extract an
2129 implicit set out of it. */
2130 if (any_condjump_p (insn))
2131 {
2132 rtx cnd = fis_get_condition (insn);
2133
2134 if (cnd != NULL_RTX
2135 && GET_CODE (cnd) == NE
2136 && REG_P (XEXP (cnd, 0))
2137 && !reg_set_p (XEXP (cnd, 0), insn)
2138 /* The following two checks, which are also in
2139 move2add_note_store, are intended to reduce the
2140 number of calls to gen_rtx_SET to avoid memory
2141 allocation if possible. */
2142 && SCALAR_INT_MODE_P (GET_MODE (XEXP (cnd, 0)))
2143 && hard_regno_nregs[REGNO (XEXP (cnd, 0))][GET_MODE (XEXP (cnd, 0))] == 1
2144 && CONST_INT_P (XEXP (cnd, 1)))
2145 {
2146 rtx implicit_set =
2147 gen_rtx_SET (VOIDmode, XEXP (cnd, 0), XEXP (cnd, 1));
2148 move2add_note_store (SET_DEST (implicit_set), implicit_set, insn);
2149 }
2150 }
2151
2152 /* If this is a CALL_INSN, all call used registers are stored with
2153 unknown values. */
2154 if (CALL_P (insn))
2155 {
2156 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
2157 {
2158 if (call_used_regs[i])
2159 /* Reset the information about this register. */
2160 reg_mode[i] = VOIDmode;
2161 }
2162 }
2163 }
2164 return changed;
2165 }
2166
2167 /* SET is a SET or CLOBBER that sets DST. DATA is the insn which
2168 contains SET.
2169 Update reg_set_luid, reg_offset and reg_base_reg accordingly.
2170 Called from reload_cse_move2add via note_stores. */
2171
2172 static void
2173 move2add_note_store (rtx dst, const_rtx set, void *data)
2174 {
2175 rtx_insn *insn = (rtx_insn *) data;
2176 unsigned int regno = 0;
2177 machine_mode mode = GET_MODE (dst);
2178
2179 /* Some targets do argument pushes without adding REG_INC notes. */
2180
2181 if (MEM_P (dst))
2182 {
2183 dst = XEXP (dst, 0);
2184 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
2185 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC)
2186 reg_mode[REGNO (XEXP (dst, 0))] = VOIDmode;
2187 return;
2188 }
2189
2190 if (GET_CODE (dst) == SUBREG)
2191 regno = subreg_regno (dst);
2192 else if (REG_P (dst))
2193 regno = REGNO (dst);
2194 else
2195 return;
2196
2197 if (SCALAR_INT_MODE_P (mode)
2198 && GET_CODE (set) == SET)
2199 {
2200 rtx note, sym = NULL_RTX;
2201 rtx off;
2202
2203 note = find_reg_equal_equiv_note (insn);
2204 if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
2205 {
2206 sym = XEXP (note, 0);
2207 off = const0_rtx;
2208 }
2209 else if (note && GET_CODE (XEXP (note, 0)) == CONST
2210 && GET_CODE (XEXP (XEXP (note, 0), 0)) == PLUS
2211 && GET_CODE (XEXP (XEXP (XEXP (note, 0), 0), 0)) == SYMBOL_REF
2212 && CONST_INT_P (XEXP (XEXP (XEXP (note, 0), 0), 1)))
2213 {
2214 sym = XEXP (XEXP (XEXP (note, 0), 0), 0);
2215 off = XEXP (XEXP (XEXP (note, 0), 0), 1);
2216 }
2217
2218 if (sym != NULL_RTX)
2219 {
2220 move2add_record_sym_value (dst, sym, off);
2221 return;
2222 }
2223 }
2224
2225 if (SCALAR_INT_MODE_P (mode)
2226 && GET_CODE (set) == SET
2227 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2228 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2229 {
2230 rtx src = SET_SRC (set);
2231 rtx base_reg;
2232 unsigned HOST_WIDE_INT offset;
2233 int base_regno;
2234
2235 switch (GET_CODE (src))
2236 {
2237 case PLUS:
2238 if (REG_P (XEXP (src, 0)))
2239 {
2240 base_reg = XEXP (src, 0);
2241
2242 if (CONST_INT_P (XEXP (src, 1)))
2243 offset = UINTVAL (XEXP (src, 1));
2244 else if (REG_P (XEXP (src, 1))
2245 && move2add_valid_value_p (REGNO (XEXP (src, 1)), mode))
2246 {
2247 if (reg_base_reg[REGNO (XEXP (src, 1))] < 0
2248 && reg_symbol_ref[REGNO (XEXP (src, 1))] == NULL_RTX)
2249 offset = reg_offset[REGNO (XEXP (src, 1))];
2250 /* Maybe the first register is known to be a
2251 constant. */
2252 else if (move2add_valid_value_p (REGNO (base_reg), mode)
2253 && reg_base_reg[REGNO (base_reg)] < 0
2254 && reg_symbol_ref[REGNO (base_reg)] == NULL_RTX)
2255 {
2256 offset = reg_offset[REGNO (base_reg)];
2257 base_reg = XEXP (src, 1);
2258 }
2259 else
2260 goto invalidate;
2261 }
2262 else
2263 goto invalidate;
2264
2265 break;
2266 }
2267
2268 goto invalidate;
2269
2270 case REG:
2271 base_reg = src;
2272 offset = 0;
2273 break;
2274
2275 case CONST_INT:
2276 /* Start tracking the register as a constant. */
2277 reg_base_reg[regno] = -1;
2278 reg_symbol_ref[regno] = NULL_RTX;
2279 reg_offset[regno] = INTVAL (SET_SRC (set));
2280 /* We assign the same luid to all registers set to constants. */
2281 reg_set_luid[regno] = move2add_last_label_luid + 1;
2282 move2add_record_mode (dst);
2283 return;
2284
2285 default:
2286 goto invalidate;
2287 }
2288
2289 base_regno = REGNO (base_reg);
2290 /* If information about the base register is not valid, set it
2291 up as a new base register, pretending its value is known
2292 starting from the current insn. */
2293 if (!move2add_valid_value_p (base_regno, mode))
2294 {
2295 reg_base_reg[base_regno] = base_regno;
2296 reg_symbol_ref[base_regno] = NULL_RTX;
2297 reg_offset[base_regno] = 0;
2298 reg_set_luid[base_regno] = move2add_luid;
2299 gcc_assert (GET_MODE (base_reg) == mode);
2300 move2add_record_mode (base_reg);
2301 }
2302
2303 /* Copy base information from our base register. */
2304 reg_set_luid[regno] = reg_set_luid[base_regno];
2305 reg_base_reg[regno] = reg_base_reg[base_regno];
2306 reg_symbol_ref[regno] = reg_symbol_ref[base_regno];
2307
2308 /* Compute the sum of the offsets or constants. */
2309 reg_offset[regno]
2310 = trunc_int_for_mode (offset + reg_offset[base_regno], mode);
2311
2312 move2add_record_mode (dst);
2313 }
2314 else
2315 {
2316 invalidate:
2317 /* Invalidate the contents of the register. */
2318 move2add_record_mode (dst);
2319 reg_mode[regno] = VOIDmode;
2320 }
2321 }
2322 \f
2323 namespace {
2324
2325 const pass_data pass_data_postreload_cse =
2326 {
2327 RTL_PASS, /* type */
2328 "postreload", /* name */
2329 OPTGROUP_NONE, /* optinfo_flags */
2330 TV_RELOAD_CSE_REGS, /* tv_id */
2331 0, /* properties_required */
2332 0, /* properties_provided */
2333 0, /* properties_destroyed */
2334 0, /* todo_flags_start */
2335 TODO_df_finish, /* todo_flags_finish */
2336 };
2337
2338 class pass_postreload_cse : public rtl_opt_pass
2339 {
2340 public:
2341 pass_postreload_cse (gcc::context *ctxt)
2342 : rtl_opt_pass (pass_data_postreload_cse, ctxt)
2343 {}
2344
2345 /* opt_pass methods: */
2346 virtual bool gate (function *) { return (optimize > 0 && reload_completed); }
2347
2348 virtual unsigned int execute (function *);
2349
2350 }; // class pass_postreload_cse
2351
2352 unsigned int
2353 pass_postreload_cse::execute (function *fun)
2354 {
2355 if (!dbg_cnt (postreload_cse))
2356 return 0;
2357
2358 /* Do a very simple CSE pass over just the hard registers. */
2359 reload_cse_regs (get_insns ());
2360 /* Reload_cse_regs can eliminate potentially-trapping MEMs.
2361 Remove any EH edges associated with them. */
2362 if (fun->can_throw_non_call_exceptions
2363 && purge_all_dead_edges ())
2364 cleanup_cfg (0);
2365
2366 return 0;
2367 }
2368
2369 } // anon namespace
2370
2371 rtl_opt_pass *
2372 make_pass_postreload_cse (gcc::context *ctxt)
2373 {
2374 return new pass_postreload_cse (ctxt);
2375 }