]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/postreload.c
Merge in trunk.
[thirdparty/gcc.git] / gcc / postreload.c
1 /* Perform simple optimizations to clean up the result of reload.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24
25 #include "machmode.h"
26 #include "hard-reg-set.h"
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "obstack.h"
30 #include "insn-config.h"
31 #include "flags.h"
32 #include "function.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "basic-block.h"
37 #include "reload.h"
38 #include "recog.h"
39 #include "cselib.h"
40 #include "diagnostic-core.h"
41 #include "except.h"
42 #include "tree.h"
43 #include "target.h"
44 #include "tree-pass.h"
45 #include "df.h"
46 #include "dbgcnt.h"
47
48 static int reload_cse_noop_set_p (rtx);
49 static bool reload_cse_simplify (rtx, rtx);
50 static void reload_cse_regs_1 (void);
51 static int reload_cse_simplify_set (rtx, rtx);
52 static int reload_cse_simplify_operands (rtx, rtx);
53
54 static void reload_combine (void);
55 static void reload_combine_note_use (rtx *, rtx, int, rtx);
56 static void reload_combine_note_store (rtx, const_rtx, void *);
57
58 static bool reload_cse_move2add (rtx);
59 static void move2add_note_store (rtx, const_rtx, void *);
60
61 /* Call cse / combine like post-reload optimization phases.
62 FIRST is the first instruction. */
63
64 static void
65 reload_cse_regs (rtx first ATTRIBUTE_UNUSED)
66 {
67 bool moves_converted;
68 reload_cse_regs_1 ();
69 reload_combine ();
70 moves_converted = reload_cse_move2add (first);
71 if (flag_expensive_optimizations)
72 {
73 if (moves_converted)
74 reload_combine ();
75 reload_cse_regs_1 ();
76 }
77 }
78
79 /* See whether a single set SET is a noop. */
80 static int
81 reload_cse_noop_set_p (rtx set)
82 {
83 if (cselib_reg_set_mode (SET_DEST (set)) != GET_MODE (SET_DEST (set)))
84 return 0;
85
86 return rtx_equal_for_cselib_p (SET_DEST (set), SET_SRC (set));
87 }
88
89 /* Try to simplify INSN. Return true if the CFG may have changed. */
90 static bool
91 reload_cse_simplify (rtx insn, rtx testreg)
92 {
93 rtx body = PATTERN (insn);
94 basic_block insn_bb = BLOCK_FOR_INSN (insn);
95 unsigned insn_bb_succs = EDGE_COUNT (insn_bb->succs);
96
97 if (GET_CODE (body) == SET)
98 {
99 int count = 0;
100
101 /* Simplify even if we may think it is a no-op.
102 We may think a memory load of a value smaller than WORD_SIZE
103 is redundant because we haven't taken into account possible
104 implicit extension. reload_cse_simplify_set() will bring
105 this out, so it's safer to simplify before we delete. */
106 count += reload_cse_simplify_set (body, insn);
107
108 if (!count && reload_cse_noop_set_p (body))
109 {
110 rtx value = SET_DEST (body);
111 if (REG_P (value)
112 && ! REG_FUNCTION_VALUE_P (value))
113 value = 0;
114 if (check_for_inc_dec (insn))
115 delete_insn_and_edges (insn);
116 /* We're done with this insn. */
117 goto done;
118 }
119
120 if (count > 0)
121 apply_change_group ();
122 else
123 reload_cse_simplify_operands (insn, testreg);
124 }
125 else if (GET_CODE (body) == PARALLEL)
126 {
127 int i;
128 int count = 0;
129 rtx value = NULL_RTX;
130
131 /* Registers mentioned in the clobber list for an asm cannot be reused
132 within the body of the asm. Invalidate those registers now so that
133 we don't try to substitute values for them. */
134 if (asm_noperands (body) >= 0)
135 {
136 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
137 {
138 rtx part = XVECEXP (body, 0, i);
139 if (GET_CODE (part) == CLOBBER && REG_P (XEXP (part, 0)))
140 cselib_invalidate_rtx (XEXP (part, 0));
141 }
142 }
143
144 /* If every action in a PARALLEL is a noop, we can delete
145 the entire PARALLEL. */
146 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
147 {
148 rtx part = XVECEXP (body, 0, i);
149 if (GET_CODE (part) == SET)
150 {
151 if (! reload_cse_noop_set_p (part))
152 break;
153 if (REG_P (SET_DEST (part))
154 && REG_FUNCTION_VALUE_P (SET_DEST (part)))
155 {
156 if (value)
157 break;
158 value = SET_DEST (part);
159 }
160 }
161 else if (GET_CODE (part) != CLOBBER)
162 break;
163 }
164
165 if (i < 0)
166 {
167 if (check_for_inc_dec (insn))
168 delete_insn_and_edges (insn);
169 /* We're done with this insn. */
170 goto done;
171 }
172
173 /* It's not a no-op, but we can try to simplify it. */
174 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
175 if (GET_CODE (XVECEXP (body, 0, i)) == SET)
176 count += reload_cse_simplify_set (XVECEXP (body, 0, i), insn);
177
178 if (count > 0)
179 apply_change_group ();
180 else
181 reload_cse_simplify_operands (insn, testreg);
182 }
183
184 done:
185 return (EDGE_COUNT (insn_bb->succs) != insn_bb_succs);
186 }
187
188 /* Do a very simple CSE pass over the hard registers.
189
190 This function detects no-op moves where we happened to assign two
191 different pseudo-registers to the same hard register, and then
192 copied one to the other. Reload will generate a useless
193 instruction copying a register to itself.
194
195 This function also detects cases where we load a value from memory
196 into two different registers, and (if memory is more expensive than
197 registers) changes it to simply copy the first register into the
198 second register.
199
200 Another optimization is performed that scans the operands of each
201 instruction to see whether the value is already available in a
202 hard register. It then replaces the operand with the hard register
203 if possible, much like an optional reload would. */
204
205 static void
206 reload_cse_regs_1 (void)
207 {
208 bool cfg_changed = false;
209 basic_block bb;
210 rtx insn;
211 rtx testreg = gen_rtx_REG (VOIDmode, -1);
212
213 cselib_init (CSELIB_RECORD_MEMORY);
214 init_alias_analysis ();
215
216 FOR_EACH_BB (bb)
217 FOR_BB_INSNS (bb, insn)
218 {
219 if (INSN_P (insn))
220 cfg_changed |= reload_cse_simplify (insn, testreg);
221
222 cselib_process_insn (insn);
223 }
224
225 /* Clean up. */
226 end_alias_analysis ();
227 cselib_finish ();
228 if (cfg_changed)
229 cleanup_cfg (0);
230 }
231
232 /* Try to simplify a single SET instruction. SET is the set pattern.
233 INSN is the instruction it came from.
234 This function only handles one case: if we set a register to a value
235 which is not a register, we try to find that value in some other register
236 and change the set into a register copy. */
237
238 static int
239 reload_cse_simplify_set (rtx set, rtx insn)
240 {
241 int did_change = 0;
242 int dreg;
243 rtx src;
244 reg_class_t dclass;
245 int old_cost;
246 cselib_val *val;
247 struct elt_loc_list *l;
248 #ifdef LOAD_EXTEND_OP
249 enum rtx_code extend_op = UNKNOWN;
250 #endif
251 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
252
253 dreg = true_regnum (SET_DEST (set));
254 if (dreg < 0)
255 return 0;
256
257 src = SET_SRC (set);
258 if (side_effects_p (src) || true_regnum (src) >= 0)
259 return 0;
260
261 dclass = REGNO_REG_CLASS (dreg);
262
263 #ifdef LOAD_EXTEND_OP
264 /* When replacing a memory with a register, we need to honor assumptions
265 that combine made wrt the contents of sign bits. We'll do this by
266 generating an extend instruction instead of a reg->reg copy. Thus
267 the destination must be a register that we can widen. */
268 if (MEM_P (src)
269 && GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD
270 && (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
271 && !REG_P (SET_DEST (set)))
272 return 0;
273 #endif
274
275 val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0, VOIDmode);
276 if (! val)
277 return 0;
278
279 /* If memory loads are cheaper than register copies, don't change them. */
280 if (MEM_P (src))
281 old_cost = memory_move_cost (GET_MODE (src), dclass, true);
282 else if (REG_P (src))
283 old_cost = register_move_cost (GET_MODE (src),
284 REGNO_REG_CLASS (REGNO (src)), dclass);
285 else
286 old_cost = set_src_cost (src, speed);
287
288 for (l = val->locs; l; l = l->next)
289 {
290 rtx this_rtx = l->loc;
291 int this_cost;
292
293 if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0))
294 {
295 #ifdef LOAD_EXTEND_OP
296 if (extend_op != UNKNOWN)
297 {
298 wide_int result;
299
300 if (!CONST_SCALAR_INT_P (this_rtx))
301 continue;
302
303 switch (extend_op)
304 {
305 case ZERO_EXTEND:
306 result = wide_int (std::make_pair (this_rtx, GET_MODE (src)));
307 if (GET_MODE_PRECISION (GET_MODE (src))
308 > GET_MODE_PRECISION (word_mode))
309 result = wi::zext (result, GET_MODE_PRECISION (word_mode));
310 break;
311 case SIGN_EXTEND:
312 result = wide_int (std::make_pair (this_rtx, GET_MODE (src)));
313 if (GET_MODE_PRECISION (GET_MODE (src))
314 > GET_MODE_PRECISION (word_mode))
315 result = wi::sext (result, GET_MODE_PRECISION (word_mode));
316 break;
317 default:
318 gcc_unreachable ();
319 }
320 this_rtx = immed_wide_int_const (result, GET_MODE (src));
321 }
322 #endif
323 this_cost = set_src_cost (this_rtx, speed);
324 }
325 else if (REG_P (this_rtx))
326 {
327 #ifdef LOAD_EXTEND_OP
328 if (extend_op != UNKNOWN)
329 {
330 this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx);
331 this_cost = set_src_cost (this_rtx, speed);
332 }
333 else
334 #endif
335 this_cost = register_move_cost (GET_MODE (this_rtx),
336 REGNO_REG_CLASS (REGNO (this_rtx)),
337 dclass);
338 }
339 else
340 continue;
341
342 /* If equal costs, prefer registers over anything else. That
343 tends to lead to smaller instructions on some machines. */
344 if (this_cost < old_cost
345 || (this_cost == old_cost
346 && REG_P (this_rtx)
347 && !REG_P (SET_SRC (set))))
348 {
349 #ifdef LOAD_EXTEND_OP
350 if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
351 && extend_op != UNKNOWN
352 #ifdef CANNOT_CHANGE_MODE_CLASS
353 && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
354 word_mode,
355 REGNO_REG_CLASS (REGNO (SET_DEST (set))))
356 #endif
357 )
358 {
359 rtx wide_dest = gen_rtx_REG (word_mode, REGNO (SET_DEST (set)));
360 ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set));
361 validate_change (insn, &SET_DEST (set), wide_dest, 1);
362 }
363 #endif
364
365 validate_unshare_change (insn, &SET_SRC (set), this_rtx, 1);
366 old_cost = this_cost, did_change = 1;
367 }
368 }
369
370 return did_change;
371 }
372
373 /* Try to replace operands in INSN with equivalent values that are already
374 in registers. This can be viewed as optional reloading.
375
376 For each non-register operand in the insn, see if any hard regs are
377 known to be equivalent to that operand. Record the alternatives which
378 can accept these hard registers. Among all alternatives, select the
379 ones which are better or equal to the one currently matching, where
380 "better" is in terms of '?' and '!' constraints. Among the remaining
381 alternatives, select the one which replaces most operands with
382 hard registers. */
383
384 static int
385 reload_cse_simplify_operands (rtx insn, rtx testreg)
386 {
387 int i, j;
388
389 /* For each operand, all registers that are equivalent to it. */
390 HARD_REG_SET equiv_regs[MAX_RECOG_OPERANDS];
391
392 const char *constraints[MAX_RECOG_OPERANDS];
393
394 /* Vector recording how bad an alternative is. */
395 int *alternative_reject;
396 /* Vector recording how many registers can be introduced by choosing
397 this alternative. */
398 int *alternative_nregs;
399 /* Array of vectors recording, for each operand and each alternative,
400 which hard register to substitute, or -1 if the operand should be
401 left as it is. */
402 int *op_alt_regno[MAX_RECOG_OPERANDS];
403 /* Array of alternatives, sorted in order of decreasing desirability. */
404 int *alternative_order;
405
406 extract_insn (insn);
407
408 if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0)
409 return 0;
410
411 /* Figure out which alternative currently matches. */
412 if (! constrain_operands (1))
413 fatal_insn_not_found (insn);
414
415 alternative_reject = XALLOCAVEC (int, recog_data.n_alternatives);
416 alternative_nregs = XALLOCAVEC (int, recog_data.n_alternatives);
417 alternative_order = XALLOCAVEC (int, recog_data.n_alternatives);
418 memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int));
419 memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int));
420
421 /* For each operand, find out which regs are equivalent. */
422 for (i = 0; i < recog_data.n_operands; i++)
423 {
424 cselib_val *v;
425 struct elt_loc_list *l;
426 rtx op;
427
428 CLEAR_HARD_REG_SET (equiv_regs[i]);
429
430 /* cselib blows up on CODE_LABELs. Trying to fix that doesn't seem
431 right, so avoid the problem here. Likewise if we have a constant
432 and the insn pattern doesn't tell us the mode we need. */
433 if (LABEL_P (recog_data.operand[i])
434 || (CONSTANT_P (recog_data.operand[i])
435 && recog_data.operand_mode[i] == VOIDmode))
436 continue;
437
438 op = recog_data.operand[i];
439 #ifdef LOAD_EXTEND_OP
440 if (MEM_P (op)
441 && GET_MODE_BITSIZE (GET_MODE (op)) < BITS_PER_WORD
442 && LOAD_EXTEND_OP (GET_MODE (op)) != UNKNOWN)
443 {
444 rtx set = single_set (insn);
445
446 /* We might have multiple sets, some of which do implicit
447 extension. Punt on this for now. */
448 if (! set)
449 continue;
450 /* If the destination is also a MEM or a STRICT_LOW_PART, no
451 extension applies.
452 Also, if there is an explicit extension, we don't have to
453 worry about an implicit one. */
454 else if (MEM_P (SET_DEST (set))
455 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART
456 || GET_CODE (SET_SRC (set)) == ZERO_EXTEND
457 || GET_CODE (SET_SRC (set)) == SIGN_EXTEND)
458 ; /* Continue ordinary processing. */
459 #ifdef CANNOT_CHANGE_MODE_CLASS
460 /* If the register cannot change mode to word_mode, it follows that
461 it cannot have been used in word_mode. */
462 else if (REG_P (SET_DEST (set))
463 && CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)),
464 word_mode,
465 REGNO_REG_CLASS (REGNO (SET_DEST (set)))))
466 ; /* Continue ordinary processing. */
467 #endif
468 /* If this is a straight load, make the extension explicit. */
469 else if (REG_P (SET_DEST (set))
470 && recog_data.n_operands == 2
471 && SET_SRC (set) == op
472 && SET_DEST (set) == recog_data.operand[1-i])
473 {
474 validate_change (insn, recog_data.operand_loc[i],
475 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (op)),
476 word_mode, op),
477 1);
478 validate_change (insn, recog_data.operand_loc[1-i],
479 gen_rtx_REG (word_mode, REGNO (SET_DEST (set))),
480 1);
481 if (! apply_change_group ())
482 return 0;
483 return reload_cse_simplify_operands (insn, testreg);
484 }
485 else
486 /* ??? There might be arithmetic operations with memory that are
487 safe to optimize, but is it worth the trouble? */
488 continue;
489 }
490 #endif /* LOAD_EXTEND_OP */
491 if (side_effects_p (op))
492 continue;
493 v = cselib_lookup (op, recog_data.operand_mode[i], 0, VOIDmode);
494 if (! v)
495 continue;
496
497 for (l = v->locs; l; l = l->next)
498 if (REG_P (l->loc))
499 SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc));
500 }
501
502 for (i = 0; i < recog_data.n_operands; i++)
503 {
504 enum machine_mode mode;
505 int regno;
506 const char *p;
507
508 op_alt_regno[i] = XALLOCAVEC (int, recog_data.n_alternatives);
509 for (j = 0; j < recog_data.n_alternatives; j++)
510 op_alt_regno[i][j] = -1;
511
512 p = constraints[i] = recog_data.constraints[i];
513 mode = recog_data.operand_mode[i];
514
515 /* Add the reject values for each alternative given by the constraints
516 for this operand. */
517 j = 0;
518 while (*p != '\0')
519 {
520 char c = *p++;
521 if (c == ',')
522 j++;
523 else if (c == '?')
524 alternative_reject[j] += 3;
525 else if (c == '!')
526 alternative_reject[j] += 300;
527 }
528
529 /* We won't change operands which are already registers. We
530 also don't want to modify output operands. */
531 regno = true_regnum (recog_data.operand[i]);
532 if (regno >= 0
533 || constraints[i][0] == '='
534 || constraints[i][0] == '+')
535 continue;
536
537 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
538 {
539 enum reg_class rclass = NO_REGS;
540
541 if (! TEST_HARD_REG_BIT (equiv_regs[i], regno))
542 continue;
543
544 SET_REGNO_RAW (testreg, regno);
545 PUT_MODE (testreg, mode);
546
547 /* We found a register equal to this operand. Now look for all
548 alternatives that can accept this register and have not been
549 assigned a register they can use yet. */
550 j = 0;
551 p = constraints[i];
552 for (;;)
553 {
554 char c = *p;
555
556 switch (c)
557 {
558 case '=': case '+': case '?':
559 case '#': case '&': case '!':
560 case '*': case '%':
561 case '0': case '1': case '2': case '3': case '4':
562 case '5': case '6': case '7': case '8': case '9':
563 case '<': case '>': case 'V': case 'o':
564 case 'E': case 'F': case 'G': case 'H':
565 case 's': case 'i': case 'n':
566 case 'I': case 'J': case 'K': case 'L':
567 case 'M': case 'N': case 'O': case 'P':
568 case 'p': case 'X': case TARGET_MEM_CONSTRAINT:
569 /* These don't say anything we care about. */
570 break;
571
572 case 'g': case 'r':
573 rclass = reg_class_subunion[(int) rclass][(int) GENERAL_REGS];
574 break;
575
576 default:
577 rclass
578 = (reg_class_subunion
579 [(int) rclass]
580 [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]);
581 break;
582
583 case ',': case '\0':
584 /* See if REGNO fits this alternative, and set it up as the
585 replacement register if we don't have one for this
586 alternative yet and the operand being replaced is not
587 a cheap CONST_INT. */
588 if (op_alt_regno[i][j] == -1
589 && recog_data.alternative_enabled_p[j]
590 && reg_fits_class_p (testreg, rclass, 0, mode)
591 && (!CONST_INT_P (recog_data.operand[i])
592 || (set_src_cost (recog_data.operand[i],
593 optimize_bb_for_speed_p
594 (BLOCK_FOR_INSN (insn)))
595 > set_src_cost (testreg,
596 optimize_bb_for_speed_p
597 (BLOCK_FOR_INSN (insn))))))
598 {
599 alternative_nregs[j]++;
600 op_alt_regno[i][j] = regno;
601 }
602 j++;
603 rclass = NO_REGS;
604 break;
605 }
606 p += CONSTRAINT_LEN (c, p);
607
608 if (c == '\0')
609 break;
610 }
611 }
612 }
613
614 /* Record all alternatives which are better or equal to the currently
615 matching one in the alternative_order array. */
616 for (i = j = 0; i < recog_data.n_alternatives; i++)
617 if (alternative_reject[i] <= alternative_reject[which_alternative])
618 alternative_order[j++] = i;
619 recog_data.n_alternatives = j;
620
621 /* Sort it. Given a small number of alternatives, a dumb algorithm
622 won't hurt too much. */
623 for (i = 0; i < recog_data.n_alternatives - 1; i++)
624 {
625 int best = i;
626 int best_reject = alternative_reject[alternative_order[i]];
627 int best_nregs = alternative_nregs[alternative_order[i]];
628 int tmp;
629
630 for (j = i + 1; j < recog_data.n_alternatives; j++)
631 {
632 int this_reject = alternative_reject[alternative_order[j]];
633 int this_nregs = alternative_nregs[alternative_order[j]];
634
635 if (this_reject < best_reject
636 || (this_reject == best_reject && this_nregs > best_nregs))
637 {
638 best = j;
639 best_reject = this_reject;
640 best_nregs = this_nregs;
641 }
642 }
643
644 tmp = alternative_order[best];
645 alternative_order[best] = alternative_order[i];
646 alternative_order[i] = tmp;
647 }
648
649 /* Substitute the operands as determined by op_alt_regno for the best
650 alternative. */
651 j = alternative_order[0];
652
653 for (i = 0; i < recog_data.n_operands; i++)
654 {
655 enum machine_mode mode = recog_data.operand_mode[i];
656 if (op_alt_regno[i][j] == -1)
657 continue;
658
659 validate_change (insn, recog_data.operand_loc[i],
660 gen_rtx_REG (mode, op_alt_regno[i][j]), 1);
661 }
662
663 for (i = recog_data.n_dups - 1; i >= 0; i--)
664 {
665 int op = recog_data.dup_num[i];
666 enum machine_mode mode = recog_data.operand_mode[op];
667
668 if (op_alt_regno[op][j] == -1)
669 continue;
670
671 validate_change (insn, recog_data.dup_loc[i],
672 gen_rtx_REG (mode, op_alt_regno[op][j]), 1);
673 }
674
675 return apply_change_group ();
676 }
677 \f
678 /* If reload couldn't use reg+reg+offset addressing, try to use reg+reg
679 addressing now.
680 This code might also be useful when reload gave up on reg+reg addressing
681 because of clashes between the return register and INDEX_REG_CLASS. */
682
683 /* The maximum number of uses of a register we can keep track of to
684 replace them with reg+reg addressing. */
685 #define RELOAD_COMBINE_MAX_USES 16
686
687 /* Describes a recorded use of a register. */
688 struct reg_use
689 {
690 /* The insn where a register has been used. */
691 rtx insn;
692 /* Points to the memory reference enclosing the use, if any, NULL_RTX
693 otherwise. */
694 rtx containing_mem;
695 /* Location of the register within INSN. */
696 rtx *usep;
697 /* The reverse uid of the insn. */
698 int ruid;
699 };
700
701 /* If the register is used in some unknown fashion, USE_INDEX is negative.
702 If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID
703 indicates where it is first set or clobbered.
704 Otherwise, USE_INDEX is the index of the last encountered use of the
705 register (which is first among these we have seen since we scan backwards).
706 USE_RUID indicates the first encountered, i.e. last, of these uses.
707 If ALL_OFFSETS_MATCH is true, all encountered uses were inside a PLUS
708 with a constant offset; OFFSET contains this constant in that case.
709 STORE_RUID is always meaningful if we only want to use a value in a
710 register in a different place: it denotes the next insn in the insn
711 stream (i.e. the last encountered) that sets or clobbers the register.
712 REAL_STORE_RUID is similar, but clobbers are ignored when updating it. */
713 static struct
714 {
715 struct reg_use reg_use[RELOAD_COMBINE_MAX_USES];
716 rtx offset;
717 int use_index;
718 int store_ruid;
719 int real_store_ruid;
720 int use_ruid;
721 bool all_offsets_match;
722 } reg_state[FIRST_PSEUDO_REGISTER];
723
724 /* Reverse linear uid. This is increased in reload_combine while scanning
725 the instructions from last to first. It is used to set last_label_ruid
726 and the store_ruid / use_ruid fields in reg_state. */
727 static int reload_combine_ruid;
728
729 /* The RUID of the last label we encountered in reload_combine. */
730 static int last_label_ruid;
731
732 /* The RUID of the last jump we encountered in reload_combine. */
733 static int last_jump_ruid;
734
735 /* The register numbers of the first and last index register. A value of
736 -1 in LAST_INDEX_REG indicates that we've previously computed these
737 values and found no suitable index registers. */
738 static int first_index_reg = -1;
739 static int last_index_reg;
740
741 #define LABEL_LIVE(LABEL) \
742 (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno])
743
744 /* Subroutine of reload_combine_split_ruids, called to fix up a single
745 ruid pointed to by *PRUID if it is higher than SPLIT_RUID. */
746
747 static inline void
748 reload_combine_split_one_ruid (int *pruid, int split_ruid)
749 {
750 if (*pruid > split_ruid)
751 (*pruid)++;
752 }
753
754 /* Called when we insert a new insn in a position we've already passed in
755 the scan. Examine all our state, increasing all ruids that are higher
756 than SPLIT_RUID by one in order to make room for a new insn. */
757
758 static void
759 reload_combine_split_ruids (int split_ruid)
760 {
761 unsigned i;
762
763 reload_combine_split_one_ruid (&reload_combine_ruid, split_ruid);
764 reload_combine_split_one_ruid (&last_label_ruid, split_ruid);
765 reload_combine_split_one_ruid (&last_jump_ruid, split_ruid);
766
767 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
768 {
769 int j, idx = reg_state[i].use_index;
770 reload_combine_split_one_ruid (&reg_state[i].use_ruid, split_ruid);
771 reload_combine_split_one_ruid (&reg_state[i].store_ruid, split_ruid);
772 reload_combine_split_one_ruid (&reg_state[i].real_store_ruid,
773 split_ruid);
774 if (idx < 0)
775 continue;
776 for (j = idx; j < RELOAD_COMBINE_MAX_USES; j++)
777 {
778 reload_combine_split_one_ruid (&reg_state[i].reg_use[j].ruid,
779 split_ruid);
780 }
781 }
782 }
783
784 /* Called when we are about to rescan a previously encountered insn with
785 reload_combine_note_use after modifying some part of it. This clears all
786 information about uses in that particular insn. */
787
788 static void
789 reload_combine_purge_insn_uses (rtx insn)
790 {
791 unsigned i;
792
793 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
794 {
795 int j, k, idx = reg_state[i].use_index;
796 if (idx < 0)
797 continue;
798 j = k = RELOAD_COMBINE_MAX_USES;
799 while (j-- > idx)
800 {
801 if (reg_state[i].reg_use[j].insn != insn)
802 {
803 k--;
804 if (k != j)
805 reg_state[i].reg_use[k] = reg_state[i].reg_use[j];
806 }
807 }
808 reg_state[i].use_index = k;
809 }
810 }
811
812 /* Called when we need to forget about all uses of REGNO after an insn
813 which is identified by RUID. */
814
815 static void
816 reload_combine_purge_reg_uses_after_ruid (unsigned regno, int ruid)
817 {
818 int j, k, idx = reg_state[regno].use_index;
819 if (idx < 0)
820 return;
821 j = k = RELOAD_COMBINE_MAX_USES;
822 while (j-- > idx)
823 {
824 if (reg_state[regno].reg_use[j].ruid >= ruid)
825 {
826 k--;
827 if (k != j)
828 reg_state[regno].reg_use[k] = reg_state[regno].reg_use[j];
829 }
830 }
831 reg_state[regno].use_index = k;
832 }
833
834 /* Find the use of REGNO with the ruid that is highest among those
835 lower than RUID_LIMIT, and return it if it is the only use of this
836 reg in the insn. Return NULL otherwise. */
837
838 static struct reg_use *
839 reload_combine_closest_single_use (unsigned regno, int ruid_limit)
840 {
841 int i, best_ruid = 0;
842 int use_idx = reg_state[regno].use_index;
843 struct reg_use *retval;
844
845 if (use_idx < 0)
846 return NULL;
847 retval = NULL;
848 for (i = use_idx; i < RELOAD_COMBINE_MAX_USES; i++)
849 {
850 struct reg_use *use = reg_state[regno].reg_use + i;
851 int this_ruid = use->ruid;
852 if (this_ruid >= ruid_limit)
853 continue;
854 if (this_ruid > best_ruid)
855 {
856 best_ruid = this_ruid;
857 retval = use;
858 }
859 else if (this_ruid == best_ruid)
860 retval = NULL;
861 }
862 if (last_label_ruid >= best_ruid)
863 return NULL;
864 return retval;
865 }
866
867 /* After we've moved an add insn, fix up any debug insns that occur
868 between the old location of the add and the new location. REG is
869 the destination register of the add insn; REPLACEMENT is the
870 SET_SRC of the add. FROM and TO specify the range in which we
871 should make this change on debug insns. */
872
873 static void
874 fixup_debug_insns (rtx reg, rtx replacement, rtx from, rtx to)
875 {
876 rtx insn;
877 for (insn = from; insn != to; insn = NEXT_INSN (insn))
878 {
879 rtx t;
880
881 if (!DEBUG_INSN_P (insn))
882 continue;
883
884 t = INSN_VAR_LOCATION_LOC (insn);
885 t = simplify_replace_rtx (t, reg, replacement);
886 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn), t, 0);
887 }
888 }
889
890 /* Subroutine of reload_combine_recognize_const_pattern. Try to replace REG
891 with SRC in the insn described by USE, taking costs into account. Return
892 true if we made the replacement. */
893
894 static bool
895 try_replace_in_use (struct reg_use *use, rtx reg, rtx src)
896 {
897 rtx use_insn = use->insn;
898 rtx mem = use->containing_mem;
899 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (use_insn));
900
901 if (mem != NULL_RTX)
902 {
903 addr_space_t as = MEM_ADDR_SPACE (mem);
904 rtx oldaddr = XEXP (mem, 0);
905 rtx newaddr = NULL_RTX;
906 int old_cost = address_cost (oldaddr, GET_MODE (mem), as, speed);
907 int new_cost;
908
909 newaddr = simplify_replace_rtx (oldaddr, reg, src);
910 if (memory_address_addr_space_p (GET_MODE (mem), newaddr, as))
911 {
912 XEXP (mem, 0) = newaddr;
913 new_cost = address_cost (newaddr, GET_MODE (mem), as, speed);
914 XEXP (mem, 0) = oldaddr;
915 if (new_cost <= old_cost
916 && validate_change (use_insn,
917 &XEXP (mem, 0), newaddr, 0))
918 return true;
919 }
920 }
921 else
922 {
923 rtx new_set = single_set (use_insn);
924 if (new_set
925 && REG_P (SET_DEST (new_set))
926 && GET_CODE (SET_SRC (new_set)) == PLUS
927 && REG_P (XEXP (SET_SRC (new_set), 0))
928 && CONSTANT_P (XEXP (SET_SRC (new_set), 1)))
929 {
930 rtx new_src;
931 int old_cost = set_src_cost (SET_SRC (new_set), speed);
932
933 gcc_assert (rtx_equal_p (XEXP (SET_SRC (new_set), 0), reg));
934 new_src = simplify_replace_rtx (SET_SRC (new_set), reg, src);
935
936 if (set_src_cost (new_src, speed) <= old_cost
937 && validate_change (use_insn, &SET_SRC (new_set),
938 new_src, 0))
939 return true;
940 }
941 }
942 return false;
943 }
944
945 /* Called by reload_combine when scanning INSN. This function tries to detect
946 patterns where a constant is added to a register, and the result is used
947 in an address.
948 Return true if no further processing is needed on INSN; false if it wasn't
949 recognized and should be handled normally. */
950
951 static bool
952 reload_combine_recognize_const_pattern (rtx insn)
953 {
954 int from_ruid = reload_combine_ruid;
955 rtx set, pat, reg, src, addreg;
956 unsigned int regno;
957 struct reg_use *use;
958 bool must_move_add;
959 rtx add_moved_after_insn = NULL_RTX;
960 int add_moved_after_ruid = 0;
961 int clobbered_regno = -1;
962
963 set = single_set (insn);
964 if (set == NULL_RTX)
965 return false;
966
967 reg = SET_DEST (set);
968 src = SET_SRC (set);
969 if (!REG_P (reg)
970 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1
971 || GET_MODE (reg) != Pmode
972 || reg == stack_pointer_rtx)
973 return false;
974
975 regno = REGNO (reg);
976
977 /* We look for a REG1 = REG2 + CONSTANT insn, followed by either
978 uses of REG1 inside an address, or inside another add insn. If
979 possible and profitable, merge the addition into subsequent
980 uses. */
981 if (GET_CODE (src) != PLUS
982 || !REG_P (XEXP (src, 0))
983 || !CONSTANT_P (XEXP (src, 1)))
984 return false;
985
986 addreg = XEXP (src, 0);
987 must_move_add = rtx_equal_p (reg, addreg);
988
989 pat = PATTERN (insn);
990 if (must_move_add && set != pat)
991 {
992 /* We have to be careful when moving the add; apart from the
993 single_set there may also be clobbers. Recognize one special
994 case, that of one clobber alongside the set (likely a clobber
995 of the CC register). */
996 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
997 if (XVECLEN (pat, 0) != 2 || XVECEXP (pat, 0, 0) != set
998 || GET_CODE (XVECEXP (pat, 0, 1)) != CLOBBER
999 || !REG_P (XEXP (XVECEXP (pat, 0, 1), 0)))
1000 return false;
1001 clobbered_regno = REGNO (XEXP (XVECEXP (pat, 0, 1), 0));
1002 }
1003
1004 do
1005 {
1006 use = reload_combine_closest_single_use (regno, from_ruid);
1007
1008 if (use)
1009 /* Start the search for the next use from here. */
1010 from_ruid = use->ruid;
1011
1012 if (use && GET_MODE (*use->usep) == Pmode)
1013 {
1014 bool delete_add = false;
1015 rtx use_insn = use->insn;
1016 int use_ruid = use->ruid;
1017
1018 /* Avoid moving the add insn past a jump. */
1019 if (must_move_add && use_ruid <= last_jump_ruid)
1020 break;
1021
1022 /* If the add clobbers another hard reg in parallel, don't move
1023 it past a real set of this hard reg. */
1024 if (must_move_add && clobbered_regno >= 0
1025 && reg_state[clobbered_regno].real_store_ruid >= use_ruid)
1026 break;
1027
1028 #ifdef HAVE_cc0
1029 /* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
1030 if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
1031 break;
1032 #endif
1033
1034 gcc_assert (reg_state[regno].store_ruid <= use_ruid);
1035 /* Avoid moving a use of ADDREG past a point where it is stored. */
1036 if (reg_state[REGNO (addreg)].store_ruid > use_ruid)
1037 break;
1038
1039 /* We also must not move the addition past an insn that sets
1040 the same register, unless we can combine two add insns. */
1041 if (must_move_add && reg_state[regno].store_ruid == use_ruid)
1042 {
1043 if (use->containing_mem == NULL_RTX)
1044 delete_add = true;
1045 else
1046 break;
1047 }
1048
1049 if (try_replace_in_use (use, reg, src))
1050 {
1051 reload_combine_purge_insn_uses (use_insn);
1052 reload_combine_note_use (&PATTERN (use_insn), use_insn,
1053 use_ruid, NULL_RTX);
1054
1055 if (delete_add)
1056 {
1057 fixup_debug_insns (reg, src, insn, use_insn);
1058 delete_insn (insn);
1059 return true;
1060 }
1061 if (must_move_add)
1062 {
1063 add_moved_after_insn = use_insn;
1064 add_moved_after_ruid = use_ruid;
1065 }
1066 continue;
1067 }
1068 }
1069 /* If we get here, we couldn't handle this use. */
1070 if (must_move_add)
1071 break;
1072 }
1073 while (use);
1074
1075 if (!must_move_add || add_moved_after_insn == NULL_RTX)
1076 /* Process the add normally. */
1077 return false;
1078
1079 fixup_debug_insns (reg, src, insn, add_moved_after_insn);
1080
1081 reorder_insns (insn, insn, add_moved_after_insn);
1082 reload_combine_purge_reg_uses_after_ruid (regno, add_moved_after_ruid);
1083 reload_combine_split_ruids (add_moved_after_ruid - 1);
1084 reload_combine_note_use (&PATTERN (insn), insn,
1085 add_moved_after_ruid, NULL_RTX);
1086 reg_state[regno].store_ruid = add_moved_after_ruid;
1087
1088 return true;
1089 }
1090
1091 /* Called by reload_combine when scanning INSN. Try to detect a pattern we
1092 can handle and improve. Return true if no further processing is needed on
1093 INSN; false if it wasn't recognized and should be handled normally. */
1094
1095 static bool
1096 reload_combine_recognize_pattern (rtx insn)
1097 {
1098 rtx set, reg, src;
1099 unsigned int regno;
1100
1101 set = single_set (insn);
1102 if (set == NULL_RTX)
1103 return false;
1104
1105 reg = SET_DEST (set);
1106 src = SET_SRC (set);
1107 if (!REG_P (reg)
1108 || hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] != 1)
1109 return false;
1110
1111 regno = REGNO (reg);
1112
1113 /* Look for (set (REGX) (CONST_INT))
1114 (set (REGX) (PLUS (REGX) (REGY)))
1115 ...
1116 ... (MEM (REGX)) ...
1117 and convert it to
1118 (set (REGZ) (CONST_INT))
1119 ...
1120 ... (MEM (PLUS (REGZ) (REGY)))... .
1121
1122 First, check that we have (set (REGX) (PLUS (REGX) (REGY)))
1123 and that we know all uses of REGX before it dies.
1124 Also, explicitly check that REGX != REGY; our life information
1125 does not yet show whether REGY changes in this insn. */
1126
1127 if (GET_CODE (src) == PLUS
1128 && reg_state[regno].all_offsets_match
1129 && last_index_reg != -1
1130 && REG_P (XEXP (src, 1))
1131 && rtx_equal_p (XEXP (src, 0), reg)
1132 && !rtx_equal_p (XEXP (src, 1), reg)
1133 && reg_state[regno].use_index >= 0
1134 && reg_state[regno].use_index < RELOAD_COMBINE_MAX_USES
1135 && last_label_ruid < reg_state[regno].use_ruid)
1136 {
1137 rtx base = XEXP (src, 1);
1138 rtx prev = prev_nonnote_nondebug_insn (insn);
1139 rtx prev_set = prev ? single_set (prev) : NULL_RTX;
1140 rtx index_reg = NULL_RTX;
1141 rtx reg_sum = NULL_RTX;
1142 int i;
1143
1144 /* Now we need to set INDEX_REG to an index register (denoted as
1145 REGZ in the illustration above) and REG_SUM to the expression
1146 register+register that we want to use to substitute uses of REG
1147 (typically in MEMs) with. First check REG and BASE for being
1148 index registers; we can use them even if they are not dead. */
1149 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno)
1150 || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS],
1151 REGNO (base)))
1152 {
1153 index_reg = reg;
1154 reg_sum = src;
1155 }
1156 else
1157 {
1158 /* Otherwise, look for a free index register. Since we have
1159 checked above that neither REG nor BASE are index registers,
1160 if we find anything at all, it will be different from these
1161 two registers. */
1162 for (i = first_index_reg; i <= last_index_reg; i++)
1163 {
1164 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i)
1165 && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES
1166 && reg_state[i].store_ruid <= reg_state[regno].use_ruid
1167 && (call_used_regs[i] || df_regs_ever_live_p (i))
1168 && (!frame_pointer_needed || i != HARD_FRAME_POINTER_REGNUM)
1169 && !fixed_regs[i] && !global_regs[i]
1170 && hard_regno_nregs[i][GET_MODE (reg)] == 1
1171 && targetm.hard_regno_scratch_ok (i))
1172 {
1173 index_reg = gen_rtx_REG (GET_MODE (reg), i);
1174 reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base);
1175 break;
1176 }
1177 }
1178 }
1179
1180 /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that
1181 (REGY), i.e. BASE, is not clobbered before the last use we'll
1182 create. */
1183 if (reg_sum
1184 && prev_set
1185 && CONST_INT_P (SET_SRC (prev_set))
1186 && rtx_equal_p (SET_DEST (prev_set), reg)
1187 && (reg_state[REGNO (base)].store_ruid
1188 <= reg_state[regno].use_ruid))
1189 {
1190 /* Change destination register and, if necessary, the constant
1191 value in PREV, the constant loading instruction. */
1192 validate_change (prev, &SET_DEST (prev_set), index_reg, 1);
1193 if (reg_state[regno].offset != const0_rtx)
1194 validate_change (prev,
1195 &SET_SRC (prev_set),
1196 GEN_INT (INTVAL (SET_SRC (prev_set))
1197 + INTVAL (reg_state[regno].offset)),
1198 1);
1199
1200 /* Now for every use of REG that we have recorded, replace REG
1201 with REG_SUM. */
1202 for (i = reg_state[regno].use_index;
1203 i < RELOAD_COMBINE_MAX_USES; i++)
1204 validate_unshare_change (reg_state[regno].reg_use[i].insn,
1205 reg_state[regno].reg_use[i].usep,
1206 /* Each change must have its own
1207 replacement. */
1208 reg_sum, 1);
1209
1210 if (apply_change_group ())
1211 {
1212 struct reg_use *lowest_ruid = NULL;
1213
1214 /* For every new use of REG_SUM, we have to record the use
1215 of BASE therein, i.e. operand 1. */
1216 for (i = reg_state[regno].use_index;
1217 i < RELOAD_COMBINE_MAX_USES; i++)
1218 {
1219 struct reg_use *use = reg_state[regno].reg_use + i;
1220 reload_combine_note_use (&XEXP (*use->usep, 1), use->insn,
1221 use->ruid, use->containing_mem);
1222 if (lowest_ruid == NULL || use->ruid < lowest_ruid->ruid)
1223 lowest_ruid = use;
1224 }
1225
1226 fixup_debug_insns (reg, reg_sum, insn, lowest_ruid->insn);
1227
1228 /* Delete the reg-reg addition. */
1229 delete_insn (insn);
1230
1231 if (reg_state[regno].offset != const0_rtx)
1232 /* Previous REG_EQUIV / REG_EQUAL notes for PREV
1233 are now invalid. */
1234 remove_reg_equal_equiv_notes (prev);
1235
1236 reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES;
1237 return true;
1238 }
1239 }
1240 }
1241 return false;
1242 }
1243
1244 static void
1245 reload_combine (void)
1246 {
1247 rtx insn, prev;
1248 basic_block bb;
1249 unsigned int r;
1250 int min_labelno, n_labels;
1251 HARD_REG_SET ever_live_at_start, *label_live;
1252
1253 /* To avoid wasting too much time later searching for an index register,
1254 determine the minimum and maximum index register numbers. */
1255 if (INDEX_REG_CLASS == NO_REGS)
1256 last_index_reg = -1;
1257 else if (first_index_reg == -1 && last_index_reg == 0)
1258 {
1259 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1260 if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r))
1261 {
1262 if (first_index_reg == -1)
1263 first_index_reg = r;
1264
1265 last_index_reg = r;
1266 }
1267
1268 /* If no index register is available, we can quit now. Set LAST_INDEX_REG
1269 to -1 so we'll know to quit early the next time we get here. */
1270 if (first_index_reg == -1)
1271 {
1272 last_index_reg = -1;
1273 return;
1274 }
1275 }
1276
1277 /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime
1278 information is a bit fuzzy immediately after reload, but it's
1279 still good enough to determine which registers are live at a jump
1280 destination. */
1281 min_labelno = get_first_label_num ();
1282 n_labels = max_label_num () - min_labelno;
1283 label_live = XNEWVEC (HARD_REG_SET, n_labels);
1284 CLEAR_HARD_REG_SET (ever_live_at_start);
1285
1286 FOR_EACH_BB_REVERSE (bb)
1287 {
1288 insn = BB_HEAD (bb);
1289 if (LABEL_P (insn))
1290 {
1291 HARD_REG_SET live;
1292 bitmap live_in = df_get_live_in (bb);
1293
1294 REG_SET_TO_HARD_REG_SET (live, live_in);
1295 compute_use_by_pseudos (&live, live_in);
1296 COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
1297 IOR_HARD_REG_SET (ever_live_at_start, live);
1298 }
1299 }
1300
1301 /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */
1302 last_label_ruid = last_jump_ruid = reload_combine_ruid = 0;
1303 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1304 {
1305 reg_state[r].store_ruid = 0;
1306 reg_state[r].real_store_ruid = 0;
1307 if (fixed_regs[r])
1308 reg_state[r].use_index = -1;
1309 else
1310 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1311 }
1312
1313 for (insn = get_last_insn (); insn; insn = prev)
1314 {
1315 bool control_flow_insn;
1316 rtx note;
1317
1318 prev = PREV_INSN (insn);
1319
1320 /* We cannot do our optimization across labels. Invalidating all the use
1321 information we have would be costly, so we just note where the label
1322 is and then later disable any optimization that would cross it. */
1323 if (LABEL_P (insn))
1324 last_label_ruid = reload_combine_ruid;
1325 else if (BARRIER_P (insn))
1326 {
1327 /* Crossing a barrier resets all the use information. */
1328 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1329 if (! fixed_regs[r])
1330 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1331 }
1332 else if (INSN_P (insn) && volatile_insn_p (PATTERN (insn)))
1333 /* Optimizations across insns being marked as volatile must be
1334 prevented. All the usage information is invalidated
1335 here. */
1336 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1337 if (! fixed_regs[r]
1338 && reg_state[r].use_index != RELOAD_COMBINE_MAX_USES)
1339 reg_state[r].use_index = -1;
1340
1341 if (! NONDEBUG_INSN_P (insn))
1342 continue;
1343
1344 reload_combine_ruid++;
1345
1346 control_flow_insn = control_flow_insn_p (insn);
1347 if (control_flow_insn)
1348 last_jump_ruid = reload_combine_ruid;
1349
1350 if (reload_combine_recognize_const_pattern (insn)
1351 || reload_combine_recognize_pattern (insn))
1352 continue;
1353
1354 note_stores (PATTERN (insn), reload_combine_note_store, NULL);
1355
1356 if (CALL_P (insn))
1357 {
1358 rtx link;
1359
1360 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1361 if (call_used_regs[r])
1362 {
1363 reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
1364 reg_state[r].store_ruid = reload_combine_ruid;
1365 }
1366
1367 for (link = CALL_INSN_FUNCTION_USAGE (insn); link;
1368 link = XEXP (link, 1))
1369 {
1370 rtx setuse = XEXP (link, 0);
1371 rtx usage_rtx = XEXP (setuse, 0);
1372 if ((GET_CODE (setuse) == USE || GET_CODE (setuse) == CLOBBER)
1373 && REG_P (usage_rtx))
1374 {
1375 unsigned int i;
1376 unsigned int start_reg = REGNO (usage_rtx);
1377 unsigned int num_regs
1378 = hard_regno_nregs[start_reg][GET_MODE (usage_rtx)];
1379 unsigned int end_reg = start_reg + num_regs - 1;
1380 for (i = start_reg; i <= end_reg; i++)
1381 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1382 {
1383 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1384 reg_state[i].store_ruid = reload_combine_ruid;
1385 }
1386 else
1387 reg_state[i].use_index = -1;
1388 }
1389 }
1390 }
1391
1392 if (control_flow_insn && !ANY_RETURN_P (PATTERN (insn)))
1393 {
1394 /* Non-spill registers might be used at the call destination in
1395 some unknown fashion, so we have to mark the unknown use. */
1396 HARD_REG_SET *live;
1397
1398 if ((condjump_p (insn) || condjump_in_parallel_p (insn))
1399 && JUMP_LABEL (insn))
1400 {
1401 if (ANY_RETURN_P (JUMP_LABEL (insn)))
1402 live = NULL;
1403 else
1404 live = &LABEL_LIVE (JUMP_LABEL (insn));
1405 }
1406 else
1407 live = &ever_live_at_start;
1408
1409 if (live)
1410 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
1411 if (TEST_HARD_REG_BIT (*live, r))
1412 reg_state[r].use_index = -1;
1413 }
1414
1415 reload_combine_note_use (&PATTERN (insn), insn, reload_combine_ruid,
1416 NULL_RTX);
1417
1418 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1419 {
1420 if (REG_NOTE_KIND (note) == REG_INC && REG_P (XEXP (note, 0)))
1421 {
1422 int regno = REGNO (XEXP (note, 0));
1423 reg_state[regno].store_ruid = reload_combine_ruid;
1424 reg_state[regno].real_store_ruid = reload_combine_ruid;
1425 reg_state[regno].use_index = -1;
1426 }
1427 }
1428 }
1429
1430 free (label_live);
1431 }
1432
1433 /* Check if DST is a register or a subreg of a register; if it is,
1434 update store_ruid, real_store_ruid and use_index in the reg_state
1435 structure accordingly. Called via note_stores from reload_combine. */
1436
1437 static void
1438 reload_combine_note_store (rtx dst, const_rtx set, void *data ATTRIBUTE_UNUSED)
1439 {
1440 int regno = 0;
1441 int i;
1442 enum machine_mode mode = GET_MODE (dst);
1443
1444 if (GET_CODE (dst) == SUBREG)
1445 {
1446 regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)),
1447 GET_MODE (SUBREG_REG (dst)),
1448 SUBREG_BYTE (dst),
1449 GET_MODE (dst));
1450 dst = SUBREG_REG (dst);
1451 }
1452
1453 /* Some targets do argument pushes without adding REG_INC notes. */
1454
1455 if (MEM_P (dst))
1456 {
1457 dst = XEXP (dst, 0);
1458 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
1459 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC
1460 || GET_CODE (dst) == PRE_MODIFY || GET_CODE (dst) == POST_MODIFY)
1461 {
1462 regno = REGNO (XEXP (dst, 0));
1463 mode = GET_MODE (XEXP (dst, 0));
1464 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1465 {
1466 /* We could probably do better, but for now mark the register
1467 as used in an unknown fashion and set/clobbered at this
1468 insn. */
1469 reg_state[i].use_index = -1;
1470 reg_state[i].store_ruid = reload_combine_ruid;
1471 reg_state[i].real_store_ruid = reload_combine_ruid;
1472 }
1473 }
1474 else
1475 return;
1476 }
1477
1478 if (!REG_P (dst))
1479 return;
1480 regno += REGNO (dst);
1481
1482 /* note_stores might have stripped a STRICT_LOW_PART, so we have to be
1483 careful with registers / register parts that are not full words.
1484 Similarly for ZERO_EXTRACT. */
1485 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT
1486 || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART)
1487 {
1488 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1489 {
1490 reg_state[i].use_index = -1;
1491 reg_state[i].store_ruid = reload_combine_ruid;
1492 reg_state[i].real_store_ruid = reload_combine_ruid;
1493 }
1494 }
1495 else
1496 {
1497 for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--)
1498 {
1499 reg_state[i].store_ruid = reload_combine_ruid;
1500 if (GET_CODE (set) == SET)
1501 reg_state[i].real_store_ruid = reload_combine_ruid;
1502 reg_state[i].use_index = RELOAD_COMBINE_MAX_USES;
1503 }
1504 }
1505 }
1506
1507 /* XP points to a piece of rtl that has to be checked for any uses of
1508 registers.
1509 *XP is the pattern of INSN, or a part of it.
1510 Called from reload_combine, and recursively by itself. */
1511 static void
1512 reload_combine_note_use (rtx *xp, rtx insn, int ruid, rtx containing_mem)
1513 {
1514 rtx x = *xp;
1515 enum rtx_code code = x->code;
1516 const char *fmt;
1517 int i, j;
1518 rtx offset = const0_rtx; /* For the REG case below. */
1519
1520 switch (code)
1521 {
1522 case SET:
1523 if (REG_P (SET_DEST (x)))
1524 {
1525 reload_combine_note_use (&SET_SRC (x), insn, ruid, NULL_RTX);
1526 return;
1527 }
1528 break;
1529
1530 case USE:
1531 /* If this is the USE of a return value, we can't change it. */
1532 if (REG_P (XEXP (x, 0)) && REG_FUNCTION_VALUE_P (XEXP (x, 0)))
1533 {
1534 /* Mark the return register as used in an unknown fashion. */
1535 rtx reg = XEXP (x, 0);
1536 int regno = REGNO (reg);
1537 int nregs = hard_regno_nregs[regno][GET_MODE (reg)];
1538
1539 while (--nregs >= 0)
1540 reg_state[regno + nregs].use_index = -1;
1541 return;
1542 }
1543 break;
1544
1545 case CLOBBER:
1546 if (REG_P (SET_DEST (x)))
1547 {
1548 /* No spurious CLOBBERs of pseudo registers may remain. */
1549 gcc_assert (REGNO (SET_DEST (x)) < FIRST_PSEUDO_REGISTER);
1550 return;
1551 }
1552 break;
1553
1554 case PLUS:
1555 /* We are interested in (plus (reg) (const_int)) . */
1556 if (!REG_P (XEXP (x, 0))
1557 || !CONST_INT_P (XEXP (x, 1)))
1558 break;
1559 offset = XEXP (x, 1);
1560 x = XEXP (x, 0);
1561 /* Fall through. */
1562 case REG:
1563 {
1564 int regno = REGNO (x);
1565 int use_index;
1566 int nregs;
1567
1568 /* No spurious USEs of pseudo registers may remain. */
1569 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1570
1571 nregs = hard_regno_nregs[regno][GET_MODE (x)];
1572
1573 /* We can't substitute into multi-hard-reg uses. */
1574 if (nregs > 1)
1575 {
1576 while (--nregs >= 0)
1577 reg_state[regno + nregs].use_index = -1;
1578 return;
1579 }
1580
1581 /* We may be called to update uses in previously seen insns.
1582 Don't add uses beyond the last store we saw. */
1583 if (ruid < reg_state[regno].store_ruid)
1584 return;
1585
1586 /* If this register is already used in some unknown fashion, we
1587 can't do anything.
1588 If we decrement the index from zero to -1, we can't store more
1589 uses, so this register becomes used in an unknown fashion. */
1590 use_index = --reg_state[regno].use_index;
1591 if (use_index < 0)
1592 return;
1593
1594 if (use_index == RELOAD_COMBINE_MAX_USES - 1)
1595 {
1596 /* This is the first use of this register we have seen since we
1597 marked it as dead. */
1598 reg_state[regno].offset = offset;
1599 reg_state[regno].all_offsets_match = true;
1600 reg_state[regno].use_ruid = ruid;
1601 }
1602 else
1603 {
1604 if (reg_state[regno].use_ruid > ruid)
1605 reg_state[regno].use_ruid = ruid;
1606
1607 if (! rtx_equal_p (offset, reg_state[regno].offset))
1608 reg_state[regno].all_offsets_match = false;
1609 }
1610
1611 reg_state[regno].reg_use[use_index].insn = insn;
1612 reg_state[regno].reg_use[use_index].ruid = ruid;
1613 reg_state[regno].reg_use[use_index].containing_mem = containing_mem;
1614 reg_state[regno].reg_use[use_index].usep = xp;
1615 return;
1616 }
1617
1618 case MEM:
1619 containing_mem = x;
1620 break;
1621
1622 default:
1623 break;
1624 }
1625
1626 /* Recursively process the components of X. */
1627 fmt = GET_RTX_FORMAT (code);
1628 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1629 {
1630 if (fmt[i] == 'e')
1631 reload_combine_note_use (&XEXP (x, i), insn, ruid, containing_mem);
1632 else if (fmt[i] == 'E')
1633 {
1634 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1635 reload_combine_note_use (&XVECEXP (x, i, j), insn, ruid,
1636 containing_mem);
1637 }
1638 }
1639 }
1640 \f
1641 /* See if we can reduce the cost of a constant by replacing a move
1642 with an add. We track situations in which a register is set to a
1643 constant or to a register plus a constant. */
1644 /* We cannot do our optimization across labels. Invalidating all the
1645 information about register contents we have would be costly, so we
1646 use move2add_last_label_luid to note where the label is and then
1647 later disable any optimization that would cross it.
1648 reg_offset[n] / reg_base_reg[n] / reg_symbol_ref[n] / reg_mode[n]
1649 are only valid if reg_set_luid[n] is greater than
1650 move2add_last_label_luid.
1651 For a set that established a new (potential) base register with
1652 non-constant value, we use move2add_luid from the place where the
1653 setting insn is encountered; registers based off that base then
1654 get the same reg_set_luid. Constants all get
1655 move2add_last_label_luid + 1 as their reg_set_luid. */
1656 static int reg_set_luid[FIRST_PSEUDO_REGISTER];
1657
1658 /* If reg_base_reg[n] is negative, register n has been set to
1659 reg_offset[n] or reg_symbol_ref[n] + reg_offset[n] in mode reg_mode[n].
1660 If reg_base_reg[n] is non-negative, register n has been set to the
1661 sum of reg_offset[n] and the value of register reg_base_reg[n]
1662 before reg_set_luid[n], calculated in mode reg_mode[n] .
1663 For multi-hard-register registers, all but the first one are
1664 recorded as BLKmode in reg_mode. Setting reg_mode to VOIDmode
1665 marks it as invalid. */
1666 static HOST_WIDE_INT reg_offset[FIRST_PSEUDO_REGISTER];
1667 static int reg_base_reg[FIRST_PSEUDO_REGISTER];
1668 static rtx reg_symbol_ref[FIRST_PSEUDO_REGISTER];
1669 static enum machine_mode reg_mode[FIRST_PSEUDO_REGISTER];
1670
1671 /* move2add_luid is linearly increased while scanning the instructions
1672 from first to last. It is used to set reg_set_luid in
1673 reload_cse_move2add and move2add_note_store. */
1674 static int move2add_luid;
1675
1676 /* move2add_last_label_luid is set whenever a label is found. Labels
1677 invalidate all previously collected reg_offset data. */
1678 static int move2add_last_label_luid;
1679
1680 /* ??? We don't know how zero / sign extension is handled, hence we
1681 can't go from a narrower to a wider mode. */
1682 #define MODES_OK_FOR_MOVE2ADD(OUTMODE, INMODE) \
1683 (GET_MODE_SIZE (OUTMODE) == GET_MODE_SIZE (INMODE) \
1684 || (GET_MODE_SIZE (OUTMODE) <= GET_MODE_SIZE (INMODE) \
1685 && TRULY_NOOP_TRUNCATION_MODES_P (OUTMODE, INMODE)))
1686
1687 /* Record that REG is being set to a value with the mode of REG. */
1688
1689 static void
1690 move2add_record_mode (rtx reg)
1691 {
1692 int regno, nregs;
1693 enum machine_mode mode = GET_MODE (reg);
1694
1695 if (GET_CODE (reg) == SUBREG)
1696 {
1697 regno = subreg_regno (reg);
1698 nregs = subreg_nregs (reg);
1699 }
1700 else if (REG_P (reg))
1701 {
1702 regno = REGNO (reg);
1703 nregs = hard_regno_nregs[regno][mode];
1704 }
1705 else
1706 gcc_unreachable ();
1707 for (int i = nregs - 1; i > 0; i--)
1708 reg_mode[regno + i] = BLKmode;
1709 reg_mode[regno] = mode;
1710 }
1711
1712 /* Record that REG is being set to the sum of SYM and OFF. */
1713
1714 static void
1715 move2add_record_sym_value (rtx reg, rtx sym, rtx off)
1716 {
1717 int regno = REGNO (reg);
1718
1719 move2add_record_mode (reg);
1720 reg_set_luid[regno] = move2add_luid;
1721 reg_base_reg[regno] = -1;
1722 reg_symbol_ref[regno] = sym;
1723 reg_offset[regno] = INTVAL (off);
1724 }
1725
1726 /* Check if REGNO contains a valid value in MODE. */
1727
1728 static bool
1729 move2add_valid_value_p (int regno, enum machine_mode mode)
1730 {
1731 if (reg_set_luid[regno] <= move2add_last_label_luid)
1732 return false;
1733
1734 if (mode != reg_mode[regno])
1735 {
1736 if (!MODES_OK_FOR_MOVE2ADD (mode, reg_mode[regno]))
1737 return false;
1738 /* The value loaded into regno in reg_mode[regno] is also valid in
1739 mode after truncation only if (REG:mode regno) is the lowpart of
1740 (REG:reg_mode[regno] regno). Now, for big endian, the starting
1741 regno of the lowpart might be different. */
1742 int s_off = subreg_lowpart_offset (mode, reg_mode[regno]);
1743 s_off = subreg_regno_offset (regno, reg_mode[regno], s_off, mode);
1744 if (s_off != 0)
1745 /* We could in principle adjust regno, check reg_mode[regno] to be
1746 BLKmode, and return s_off to the caller (vs. -1 for failure),
1747 but we currently have no callers that could make use of this
1748 information. */
1749 return false;
1750 }
1751
1752 for (int i = hard_regno_nregs[regno][mode] - 1; i > 0; i--)
1753 if (reg_mode[regno + i] != BLKmode)
1754 return false;
1755 return true;
1756 }
1757
1758 /* This function is called with INSN that sets REG to (SYM + OFF),
1759 while REG is known to already have value (SYM + offset).
1760 This function tries to change INSN into an add instruction
1761 (set (REG) (plus (REG) (OFF - offset))) using the known value.
1762 It also updates the information about REG's known value.
1763 Return true if we made a change. */
1764
1765 static bool
1766 move2add_use_add2_insn (rtx reg, rtx sym, rtx off, rtx insn)
1767 {
1768 rtx pat = PATTERN (insn);
1769 rtx src = SET_SRC (pat);
1770 int regno = REGNO (reg);
1771 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[regno],
1772 GET_MODE (reg));
1773 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1774 bool changed = false;
1775
1776 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1777 use (set (reg) (reg)) instead.
1778 We don't delete this insn, nor do we convert it into a
1779 note, to avoid losing register notes or the return
1780 value flag. jump2 already knows how to get rid of
1781 no-op moves. */
1782 if (new_src == const0_rtx)
1783 {
1784 /* If the constants are different, this is a
1785 truncation, that, if turned into (set (reg)
1786 (reg)), would be discarded. Maybe we should
1787 try a truncMN pattern? */
1788 if (INTVAL (off) == reg_offset [regno])
1789 changed = validate_change (insn, &SET_SRC (pat), reg, 0);
1790 }
1791 else
1792 {
1793 struct full_rtx_costs oldcst, newcst;
1794 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
1795
1796 get_full_set_rtx_cost (pat, &oldcst);
1797 SET_SRC (pat) = tem;
1798 get_full_set_rtx_cost (pat, &newcst);
1799 SET_SRC (pat) = src;
1800
1801 if (costs_lt_p (&newcst, &oldcst, speed)
1802 && have_add2_insn (reg, new_src))
1803 changed = validate_change (insn, &SET_SRC (pat), tem, 0);
1804 else if (sym == NULL_RTX && GET_MODE (reg) != BImode)
1805 {
1806 enum machine_mode narrow_mode;
1807 for (narrow_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
1808 narrow_mode != VOIDmode
1809 && narrow_mode != GET_MODE (reg);
1810 narrow_mode = GET_MODE_WIDER_MODE (narrow_mode))
1811 {
1812 if (have_insn_for (STRICT_LOW_PART, narrow_mode)
1813 && ((reg_offset[regno] & ~GET_MODE_MASK (narrow_mode))
1814 == (INTVAL (off) & ~GET_MODE_MASK (narrow_mode))))
1815 {
1816 rtx narrow_reg = gen_lowpart_common (narrow_mode, reg);
1817 rtx narrow_src = gen_int_mode (INTVAL (off),
1818 narrow_mode);
1819 rtx new_set
1820 = gen_rtx_SET (VOIDmode,
1821 gen_rtx_STRICT_LOW_PART (VOIDmode,
1822 narrow_reg),
1823 narrow_src);
1824 changed = validate_change (insn, &PATTERN (insn),
1825 new_set, 0);
1826 if (changed)
1827 break;
1828 }
1829 }
1830 }
1831 }
1832 move2add_record_sym_value (reg, sym, off);
1833 return changed;
1834 }
1835
1836
1837 /* This function is called with INSN that sets REG to (SYM + OFF),
1838 but REG doesn't have known value (SYM + offset). This function
1839 tries to find another register which is known to already have
1840 value (SYM + offset) and change INSN into an add instruction
1841 (set (REG) (plus (the found register) (OFF - offset))) if such
1842 a register is found. It also updates the information about
1843 REG's known value.
1844 Return true iff we made a change. */
1845
1846 static bool
1847 move2add_use_add3_insn (rtx reg, rtx sym, rtx off, rtx insn)
1848 {
1849 rtx pat = PATTERN (insn);
1850 rtx src = SET_SRC (pat);
1851 int regno = REGNO (reg);
1852 int min_regno = 0;
1853 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
1854 int i;
1855 bool changed = false;
1856 struct full_rtx_costs oldcst, newcst, mincst;
1857 rtx plus_expr;
1858
1859 init_costs_to_max (&mincst);
1860 get_full_set_rtx_cost (pat, &oldcst);
1861
1862 plus_expr = gen_rtx_PLUS (GET_MODE (reg), reg, const0_rtx);
1863 SET_SRC (pat) = plus_expr;
1864
1865 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1866 if (move2add_valid_value_p (i, GET_MODE (reg))
1867 && reg_base_reg[i] < 0
1868 && reg_symbol_ref[i] != NULL_RTX
1869 && rtx_equal_p (sym, reg_symbol_ref[i]))
1870 {
1871 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[i],
1872 GET_MODE (reg));
1873 /* (set (reg) (plus (reg) (const_int 0))) is not canonical;
1874 use (set (reg) (reg)) instead.
1875 We don't delete this insn, nor do we convert it into a
1876 note, to avoid losing register notes or the return
1877 value flag. jump2 already knows how to get rid of
1878 no-op moves. */
1879 if (new_src == const0_rtx)
1880 {
1881 init_costs_to_zero (&mincst);
1882 min_regno = i;
1883 break;
1884 }
1885 else
1886 {
1887 XEXP (plus_expr, 1) = new_src;
1888 get_full_set_rtx_cost (pat, &newcst);
1889
1890 if (costs_lt_p (&newcst, &mincst, speed))
1891 {
1892 mincst = newcst;
1893 min_regno = i;
1894 }
1895 }
1896 }
1897 SET_SRC (pat) = src;
1898
1899 if (costs_lt_p (&mincst, &oldcst, speed))
1900 {
1901 rtx tem;
1902
1903 tem = gen_rtx_REG (GET_MODE (reg), min_regno);
1904 if (i != min_regno)
1905 {
1906 rtx new_src = gen_int_mode (INTVAL (off) - reg_offset[min_regno],
1907 GET_MODE (reg));
1908 tem = gen_rtx_PLUS (GET_MODE (reg), tem, new_src);
1909 }
1910 if (validate_change (insn, &SET_SRC (pat), tem, 0))
1911 changed = true;
1912 }
1913 reg_set_luid[regno] = move2add_luid;
1914 move2add_record_sym_value (reg, sym, off);
1915 return changed;
1916 }
1917
1918 /* Convert move insns with constant inputs to additions if they are cheaper.
1919 Return true if any changes were made. */
1920 static bool
1921 reload_cse_move2add (rtx first)
1922 {
1923 int i;
1924 rtx insn;
1925 bool changed = false;
1926
1927 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
1928 {
1929 reg_set_luid[i] = 0;
1930 reg_offset[i] = 0;
1931 reg_base_reg[i] = 0;
1932 reg_symbol_ref[i] = NULL_RTX;
1933 reg_mode[i] = VOIDmode;
1934 }
1935
1936 move2add_last_label_luid = 0;
1937 move2add_luid = 2;
1938 for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++)
1939 {
1940 rtx pat, note;
1941
1942 if (LABEL_P (insn))
1943 {
1944 move2add_last_label_luid = move2add_luid;
1945 /* We're going to increment move2add_luid twice after a
1946 label, so that we can use move2add_last_label_luid + 1 as
1947 the luid for constants. */
1948 move2add_luid++;
1949 continue;
1950 }
1951 if (! INSN_P (insn))
1952 continue;
1953 pat = PATTERN (insn);
1954 /* For simplicity, we only perform this optimization on
1955 straightforward SETs. */
1956 if (GET_CODE (pat) == SET
1957 && REG_P (SET_DEST (pat)))
1958 {
1959 rtx reg = SET_DEST (pat);
1960 int regno = REGNO (reg);
1961 rtx src = SET_SRC (pat);
1962
1963 /* Check if we have valid information on the contents of this
1964 register in the mode of REG. */
1965 if (move2add_valid_value_p (regno, GET_MODE (reg))
1966 && dbg_cnt (cse2_move2add))
1967 {
1968 /* Try to transform (set (REGX) (CONST_INT A))
1969 ...
1970 (set (REGX) (CONST_INT B))
1971 to
1972 (set (REGX) (CONST_INT A))
1973 ...
1974 (set (REGX) (plus (REGX) (CONST_INT B-A)))
1975 or
1976 (set (REGX) (CONST_INT A))
1977 ...
1978 (set (STRICT_LOW_PART (REGX)) (CONST_INT B))
1979 */
1980
1981 if (CONST_INT_P (src)
1982 && reg_base_reg[regno] < 0
1983 && reg_symbol_ref[regno] == NULL_RTX)
1984 {
1985 changed |= move2add_use_add2_insn (reg, NULL_RTX, src, insn);
1986 continue;
1987 }
1988
1989 /* Try to transform (set (REGX) (REGY))
1990 (set (REGX) (PLUS (REGX) (CONST_INT A)))
1991 ...
1992 (set (REGX) (REGY))
1993 (set (REGX) (PLUS (REGX) (CONST_INT B)))
1994 to
1995 (set (REGX) (REGY))
1996 (set (REGX) (PLUS (REGX) (CONST_INT A)))
1997 ...
1998 (set (REGX) (plus (REGX) (CONST_INT B-A))) */
1999 else if (REG_P (src)
2000 && reg_set_luid[regno] == reg_set_luid[REGNO (src)]
2001 && reg_base_reg[regno] == reg_base_reg[REGNO (src)]
2002 && move2add_valid_value_p (REGNO (src), GET_MODE (reg)))
2003 {
2004 rtx next = next_nonnote_nondebug_insn (insn);
2005 rtx set = NULL_RTX;
2006 if (next)
2007 set = single_set (next);
2008 if (set
2009 && SET_DEST (set) == reg
2010 && GET_CODE (SET_SRC (set)) == PLUS
2011 && XEXP (SET_SRC (set), 0) == reg
2012 && CONST_INT_P (XEXP (SET_SRC (set), 1)))
2013 {
2014 rtx src3 = XEXP (SET_SRC (set), 1);
2015 HOST_WIDE_INT added_offset = INTVAL (src3);
2016 HOST_WIDE_INT base_offset = reg_offset[REGNO (src)];
2017 HOST_WIDE_INT regno_offset = reg_offset[regno];
2018 rtx new_src =
2019 gen_int_mode (added_offset
2020 + base_offset
2021 - regno_offset,
2022 GET_MODE (reg));
2023 bool success = false;
2024 bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
2025
2026 if (new_src == const0_rtx)
2027 /* See above why we create (set (reg) (reg)) here. */
2028 success
2029 = validate_change (next, &SET_SRC (set), reg, 0);
2030 else
2031 {
2032 rtx old_src = SET_SRC (set);
2033 struct full_rtx_costs oldcst, newcst;
2034 rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src);
2035
2036 get_full_set_rtx_cost (set, &oldcst);
2037 SET_SRC (set) = tem;
2038 get_full_set_src_cost (tem, &newcst);
2039 SET_SRC (set) = old_src;
2040 costs_add_n_insns (&oldcst, 1);
2041
2042 if (costs_lt_p (&newcst, &oldcst, speed)
2043 && have_add2_insn (reg, new_src))
2044 {
2045 rtx newpat = gen_rtx_SET (VOIDmode, reg, tem);
2046 success
2047 = validate_change (next, &PATTERN (next),
2048 newpat, 0);
2049 }
2050 }
2051 if (success)
2052 delete_insn (insn);
2053 changed |= success;
2054 insn = next;
2055 move2add_record_mode (reg);
2056 reg_offset[regno]
2057 = trunc_int_for_mode (added_offset + base_offset,
2058 GET_MODE (reg));
2059 continue;
2060 }
2061 }
2062 }
2063
2064 /* Try to transform
2065 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
2066 ...
2067 (set (REGY) (CONST (PLUS (SYMBOL_REF) (CONST_INT B))))
2068 to
2069 (set (REGX) (CONST (PLUS (SYMBOL_REF) (CONST_INT A))))
2070 ...
2071 (set (REGY) (CONST (PLUS (REGX) (CONST_INT B-A)))) */
2072 if ((GET_CODE (src) == SYMBOL_REF
2073 || (GET_CODE (src) == CONST
2074 && GET_CODE (XEXP (src, 0)) == PLUS
2075 && GET_CODE (XEXP (XEXP (src, 0), 0)) == SYMBOL_REF
2076 && CONST_INT_P (XEXP (XEXP (src, 0), 1))))
2077 && dbg_cnt (cse2_move2add))
2078 {
2079 rtx sym, off;
2080
2081 if (GET_CODE (src) == SYMBOL_REF)
2082 {
2083 sym = src;
2084 off = const0_rtx;
2085 }
2086 else
2087 {
2088 sym = XEXP (XEXP (src, 0), 0);
2089 off = XEXP (XEXP (src, 0), 1);
2090 }
2091
2092 /* If the reg already contains the value which is sum of
2093 sym and some constant value, we can use an add2 insn. */
2094 if (move2add_valid_value_p (regno, GET_MODE (reg))
2095 && reg_base_reg[regno] < 0
2096 && reg_symbol_ref[regno] != NULL_RTX
2097 && rtx_equal_p (sym, reg_symbol_ref[regno]))
2098 changed |= move2add_use_add2_insn (reg, sym, off, insn);
2099
2100 /* Otherwise, we have to find a register whose value is sum
2101 of sym and some constant value. */
2102 else
2103 changed |= move2add_use_add3_insn (reg, sym, off, insn);
2104
2105 continue;
2106 }
2107 }
2108
2109 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2110 {
2111 if (REG_NOTE_KIND (note) == REG_INC
2112 && REG_P (XEXP (note, 0)))
2113 {
2114 /* Reset the information about this register. */
2115 int regno = REGNO (XEXP (note, 0));
2116 if (regno < FIRST_PSEUDO_REGISTER)
2117 {
2118 move2add_record_mode (XEXP (note, 0));
2119 reg_mode[regno] = VOIDmode;
2120 }
2121 }
2122 }
2123 note_stores (PATTERN (insn), move2add_note_store, insn);
2124
2125 /* If INSN is a conditional branch, we try to extract an
2126 implicit set out of it. */
2127 if (any_condjump_p (insn))
2128 {
2129 rtx cnd = fis_get_condition (insn);
2130
2131 if (cnd != NULL_RTX
2132 && GET_CODE (cnd) == NE
2133 && REG_P (XEXP (cnd, 0))
2134 && !reg_set_p (XEXP (cnd, 0), insn)
2135 /* The following two checks, which are also in
2136 move2add_note_store, are intended to reduce the
2137 number of calls to gen_rtx_SET to avoid memory
2138 allocation if possible. */
2139 && SCALAR_INT_MODE_P (GET_MODE (XEXP (cnd, 0)))
2140 && hard_regno_nregs[REGNO (XEXP (cnd, 0))][GET_MODE (XEXP (cnd, 0))] == 1
2141 && CONST_INT_P (XEXP (cnd, 1)))
2142 {
2143 rtx implicit_set =
2144 gen_rtx_SET (VOIDmode, XEXP (cnd, 0), XEXP (cnd, 1));
2145 move2add_note_store (SET_DEST (implicit_set), implicit_set, insn);
2146 }
2147 }
2148
2149 /* If this is a CALL_INSN, all call used registers are stored with
2150 unknown values. */
2151 if (CALL_P (insn))
2152 {
2153 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
2154 {
2155 if (call_used_regs[i])
2156 /* Reset the information about this register. */
2157 reg_mode[i] = VOIDmode;
2158 }
2159 }
2160 }
2161 return changed;
2162 }
2163
2164 /* SET is a SET or CLOBBER that sets DST. DATA is the insn which
2165 contains SET.
2166 Update reg_set_luid, reg_offset and reg_base_reg accordingly.
2167 Called from reload_cse_move2add via note_stores. */
2168
2169 static void
2170 move2add_note_store (rtx dst, const_rtx set, void *data)
2171 {
2172 rtx insn = (rtx) data;
2173 unsigned int regno = 0;
2174 enum machine_mode mode = GET_MODE (dst);
2175
2176 /* Some targets do argument pushes without adding REG_INC notes. */
2177
2178 if (MEM_P (dst))
2179 {
2180 dst = XEXP (dst, 0);
2181 if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC
2182 || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC)
2183 reg_mode[REGNO (XEXP (dst, 0))] = VOIDmode;
2184 return;
2185 }
2186
2187 if (GET_CODE (dst) == SUBREG)
2188 regno = subreg_regno (dst);
2189 else if (REG_P (dst))
2190 regno = REGNO (dst);
2191 else
2192 return;
2193
2194 if (SCALAR_INT_MODE_P (mode)
2195 && GET_CODE (set) == SET)
2196 {
2197 rtx note, sym = NULL_RTX;
2198 rtx off;
2199
2200 note = find_reg_equal_equiv_note (insn);
2201 if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
2202 {
2203 sym = XEXP (note, 0);
2204 off = const0_rtx;
2205 }
2206 else if (note && GET_CODE (XEXP (note, 0)) == CONST
2207 && GET_CODE (XEXP (XEXP (note, 0), 0)) == PLUS
2208 && GET_CODE (XEXP (XEXP (XEXP (note, 0), 0), 0)) == SYMBOL_REF
2209 && CONST_INT_P (XEXP (XEXP (XEXP (note, 0), 0), 1)))
2210 {
2211 sym = XEXP (XEXP (XEXP (note, 0), 0), 0);
2212 off = XEXP (XEXP (XEXP (note, 0), 0), 1);
2213 }
2214
2215 if (sym != NULL_RTX)
2216 {
2217 move2add_record_sym_value (dst, sym, off);
2218 return;
2219 }
2220 }
2221
2222 if (SCALAR_INT_MODE_P (mode)
2223 && GET_CODE (set) == SET
2224 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2225 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2226 {
2227 rtx src = SET_SRC (set);
2228 rtx base_reg;
2229 HOST_WIDE_INT offset;
2230 int base_regno;
2231
2232 switch (GET_CODE (src))
2233 {
2234 case PLUS:
2235 if (REG_P (XEXP (src, 0)))
2236 {
2237 base_reg = XEXP (src, 0);
2238
2239 if (CONST_INT_P (XEXP (src, 1)))
2240 offset = INTVAL (XEXP (src, 1));
2241 else if (REG_P (XEXP (src, 1))
2242 && move2add_valid_value_p (REGNO (XEXP (src, 1)), mode))
2243 {
2244 if (reg_base_reg[REGNO (XEXP (src, 1))] < 0
2245 && reg_symbol_ref[REGNO (XEXP (src, 1))] == NULL_RTX)
2246 offset = reg_offset[REGNO (XEXP (src, 1))];
2247 /* Maybe the first register is known to be a
2248 constant. */
2249 else if (move2add_valid_value_p (REGNO (base_reg), mode)
2250 && reg_base_reg[REGNO (base_reg)] < 0
2251 && reg_symbol_ref[REGNO (base_reg)] == NULL_RTX)
2252 {
2253 offset = reg_offset[REGNO (base_reg)];
2254 base_reg = XEXP (src, 1);
2255 }
2256 else
2257 goto invalidate;
2258 }
2259 else
2260 goto invalidate;
2261
2262 break;
2263 }
2264
2265 goto invalidate;
2266
2267 case REG:
2268 base_reg = src;
2269 offset = 0;
2270 break;
2271
2272 case CONST_INT:
2273 /* Start tracking the register as a constant. */
2274 reg_base_reg[regno] = -1;
2275 reg_symbol_ref[regno] = NULL_RTX;
2276 reg_offset[regno] = INTVAL (SET_SRC (set));
2277 /* We assign the same luid to all registers set to constants. */
2278 reg_set_luid[regno] = move2add_last_label_luid + 1;
2279 move2add_record_mode (dst);
2280 return;
2281
2282 default:
2283 goto invalidate;
2284 }
2285
2286 base_regno = REGNO (base_reg);
2287 /* If information about the base register is not valid, set it
2288 up as a new base register, pretending its value is known
2289 starting from the current insn. */
2290 if (!move2add_valid_value_p (base_regno, mode))
2291 {
2292 reg_base_reg[base_regno] = base_regno;
2293 reg_symbol_ref[base_regno] = NULL_RTX;
2294 reg_offset[base_regno] = 0;
2295 reg_set_luid[base_regno] = move2add_luid;
2296 gcc_assert (GET_MODE (base_reg) == mode);
2297 move2add_record_mode (base_reg);
2298 }
2299
2300 /* Copy base information from our base register. */
2301 reg_set_luid[regno] = reg_set_luid[base_regno];
2302 reg_base_reg[regno] = reg_base_reg[base_regno];
2303 reg_symbol_ref[regno] = reg_symbol_ref[base_regno];
2304
2305 /* Compute the sum of the offsets or constants. */
2306 reg_offset[regno]
2307 = trunc_int_for_mode (offset + reg_offset[base_regno], mode);
2308
2309 move2add_record_mode (dst);
2310 }
2311 else
2312 {
2313 invalidate:
2314 /* Invalidate the contents of the register. */
2315 move2add_record_mode (dst);
2316 reg_mode[regno] = VOIDmode;
2317 }
2318 }
2319 \f
2320 static bool
2321 gate_handle_postreload (void)
2322 {
2323 return (optimize > 0 && reload_completed);
2324 }
2325
2326
2327 static unsigned int
2328 rest_of_handle_postreload (void)
2329 {
2330 if (!dbg_cnt (postreload_cse))
2331 return 0;
2332
2333 /* Do a very simple CSE pass over just the hard registers. */
2334 reload_cse_regs (get_insns ());
2335 /* Reload_cse_regs can eliminate potentially-trapping MEMs.
2336 Remove any EH edges associated with them. */
2337 if (cfun->can_throw_non_call_exceptions
2338 && purge_all_dead_edges ())
2339 cleanup_cfg (0);
2340
2341 return 0;
2342 }
2343
2344 namespace {
2345
2346 const pass_data pass_data_postreload_cse =
2347 {
2348 RTL_PASS, /* type */
2349 "postreload", /* name */
2350 OPTGROUP_NONE, /* optinfo_flags */
2351 true, /* has_gate */
2352 true, /* has_execute */
2353 TV_RELOAD_CSE_REGS, /* tv_id */
2354 0, /* properties_required */
2355 0, /* properties_provided */
2356 0, /* properties_destroyed */
2357 0, /* todo_flags_start */
2358 ( TODO_df_finish | TODO_verify_rtl_sharing | 0 ), /* todo_flags_finish */
2359 };
2360
2361 class pass_postreload_cse : public rtl_opt_pass
2362 {
2363 public:
2364 pass_postreload_cse (gcc::context *ctxt)
2365 : rtl_opt_pass (pass_data_postreload_cse, ctxt)
2366 {}
2367
2368 /* opt_pass methods: */
2369 bool gate () { return gate_handle_postreload (); }
2370 unsigned int execute () { return rest_of_handle_postreload (); }
2371
2372 }; // class pass_postreload_cse
2373
2374 } // anon namespace
2375
2376 rtl_opt_pass *
2377 make_pass_postreload_cse (gcc::context *ctxt)
2378 {
2379 return new pass_postreload_cse (ctxt);
2380 }