]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/struct-equiv.c
Change copyright header to refer to version 3 of the GNU General Public License and...
[thirdparty/gcc.git] / gcc / struct-equiv.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Try to match two basic blocks - or their ends - for structural equivalence.
23 We scan the blocks from their ends backwards, and expect that insns are
24 identical, except for certain cases involving registers. A mismatch
25 We scan the blocks from their ends backwards, hoping to find a match, I.e.
26 insns are identical, except for certain cases involving registers. A
27 mismatch between register number RX (used in block X) and RY (used in the
28 same way in block Y) can be handled in one of the following cases:
29 1. RX and RY are local to their respective blocks; they are set there and
30 die there. If so, they can effectively be ignored.
31 2. RX and RY die in their blocks, but live at the start. If any path
32 gets redirected through X instead of Y, the caller must emit
33 compensation code to move RY to RX. If there are overlapping inputs,
34 the function resolve_input_conflict ensures that this can be done.
35 Information about these registers are tracked in the X_LOCAL, Y_LOCAL,
36 LOCAL_COUNT and LOCAL_RVALUE fields.
37 3. RX and RY live throughout their blocks, including the start and the end.
38 Either RX and RY must be identical, or we have to replace all uses in
39 block X with a new pseudo, which is stored in the INPUT_REG field. The
40 caller can then use block X instead of block Y by copying RY to the new
41 pseudo.
42
43 The main entry point to this file is struct_equiv_block_eq. This function
44 uses a struct equiv_info to accept some of its inputs, to keep track of its
45 internal state, to pass down to its helper functions, and to communicate
46 some of the results back to the caller.
47
48 Most scans will result in a failure to match a sufficient number of insns
49 to make any optimization worth while, therefore the process is geared more
50 to quick scanning rather than the ability to exactly backtrack when we
51 find a mismatch. The information gathered is still meaningful to make a
52 preliminary decision if we want to do an optimization, we might only
53 slightly overestimate the number of matchable insns, and underestimate
54 the number of inputs an miss an input conflict. Sufficient information
55 is gathered so that when we make another pass, we won't have to backtrack
56 at the same point.
57 Another issue is that information in memory attributes and/or REG_NOTES
58 might have to be merged or discarded to make a valid match. We don't want
59 to discard such information when we are not certain that we want to merge
60 the two (partial) blocks.
61 For these reasons, struct_equiv_block_eq has to be called first with the
62 STRUCT_EQUIV_START bit set in the mode parameter. This will calculate the
63 number of matched insns and the number and types of inputs. If the
64 need_rerun field is set, the results are only tentative, and the caller
65 has to call again with STRUCT_EQUIV_RERUN till need_rerun is false in
66 order to get a reliable match.
67 To install the changes necessary for the match, the function has to be
68 called again with STRUCT_EQUIV_FINAL.
69
70 While scanning an insn, we process first all the SET_DESTs, then the
71 SET_SRCes, then the REG_NOTES, in order to keep the register liveness
72 information consistent.
73 If we were to mix up the order for sources / destinations in an insn where
74 a source is also a destination, we'd end up being mistaken to think that
75 the register is not live in the preceding insn. */
76
77 #include "config.h"
78 #include "system.h"
79 #include "coretypes.h"
80 #include "tm.h"
81 #include "rtl.h"
82 #include "regs.h"
83 #include "output.h"
84 #include "insn-config.h"
85 #include "flags.h"
86 #include "recog.h"
87 #include "tm_p.h"
88 #include "target.h"
89 #include "emit-rtl.h"
90 #include "reload.h"
91 #include "df.h"
92
93 static void merge_memattrs (rtx, rtx);
94 static bool set_dest_equiv_p (rtx x, rtx y, struct equiv_info *info);
95 static bool set_dest_addr_equiv_p (rtx x, rtx y, struct equiv_info *info);
96 static void find_dying_inputs (struct equiv_info *info);
97 static bool resolve_input_conflict (struct equiv_info *info);
98
99 /* After reload, some moves, as indicated by SECONDARY_RELOAD_CLASS and
100 SECONDARY_MEMORY_NEEDED, cannot be done directly. For our purposes, we
101 consider them impossible to generate after reload (even though some
102 might be synthesized when you throw enough code at them).
103 Since we don't know while processing a cross-jump if a local register
104 that is currently live will eventually be live and thus be an input,
105 we keep track of potential inputs that would require an impossible move
106 by using a prohibitively high cost for them.
107 This number, multiplied with the larger of STRUCT_EQUIV_MAX_LOCAL and
108 FIRST_PSEUDO_REGISTER, must fit in the input_cost field of
109 struct equiv_info. */
110 #define IMPOSSIBLE_MOVE_FACTOR 20000
111
112 \f
113
114 /* Removes the memory attributes of MEM expression
115 if they are not equal. */
116
117 void
118 merge_memattrs (rtx x, rtx y)
119 {
120 int i;
121 int j;
122 enum rtx_code code;
123 const char *fmt;
124
125 if (x == y)
126 return;
127 if (x == 0 || y == 0)
128 return;
129
130 code = GET_CODE (x);
131
132 if (code != GET_CODE (y))
133 return;
134
135 if (GET_MODE (x) != GET_MODE (y))
136 return;
137
138 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
139 {
140 if (! MEM_ATTRS (x))
141 MEM_ATTRS (y) = 0;
142 else if (! MEM_ATTRS (y))
143 MEM_ATTRS (x) = 0;
144 else
145 {
146 rtx mem_size;
147
148 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
149 {
150 set_mem_alias_set (x, 0);
151 set_mem_alias_set (y, 0);
152 }
153
154 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
155 {
156 set_mem_expr (x, 0);
157 set_mem_expr (y, 0);
158 set_mem_offset (x, 0);
159 set_mem_offset (y, 0);
160 }
161 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
162 {
163 set_mem_offset (x, 0);
164 set_mem_offset (y, 0);
165 }
166
167 if (!MEM_SIZE (x))
168 mem_size = NULL_RTX;
169 else if (!MEM_SIZE (y))
170 mem_size = NULL_RTX;
171 else
172 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
173 INTVAL (MEM_SIZE (y))));
174 set_mem_size (x, mem_size);
175 set_mem_size (y, mem_size);
176
177 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
178 set_mem_align (y, MEM_ALIGN (x));
179 }
180 }
181
182 fmt = GET_RTX_FORMAT (code);
183 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
184 {
185 switch (fmt[i])
186 {
187 case 'E':
188 /* Two vectors must have the same length. */
189 if (XVECLEN (x, i) != XVECLEN (y, i))
190 return;
191
192 for (j = 0; j < XVECLEN (x, i); j++)
193 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
194
195 break;
196
197 case 'e':
198 merge_memattrs (XEXP (x, i), XEXP (y, i));
199 }
200 }
201 return;
202 }
203
204 /* In SET, assign the bit for the register number of REG the value VALUE.
205 If REG is a hard register, do so for all its constituent registers.
206 Return the number of registers that have become included (as a positive
207 number) or excluded (as a negative number). */
208 static int
209 assign_reg_reg_set (regset set, rtx reg, int value)
210 {
211 unsigned regno = REGNO (reg);
212 int nregs, i, old;
213
214 if (regno >= FIRST_PSEUDO_REGISTER)
215 {
216 gcc_assert (!reload_completed);
217 nregs = 1;
218 }
219 else
220 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
221 for (old = 0, i = nregs; --i >= 0; regno++)
222 {
223 if ((value != 0) == REGNO_REG_SET_P (set, regno))
224 continue;
225 if (value)
226 old++, SET_REGNO_REG_SET (set, regno);
227 else
228 old--, CLEAR_REGNO_REG_SET (set, regno);
229 }
230 return old;
231 }
232
233 /* Record state about current inputs / local registers / liveness
234 in *P. */
235 static inline void
236 struct_equiv_make_checkpoint (struct struct_equiv_checkpoint *p,
237 struct equiv_info *info)
238 {
239 *p = info->cur;
240 }
241
242 /* Call struct_equiv_make_checkpoint (P, INFO) if the current partial block
243 is suitable to split off - i.e. there is no dangling cc0 user - and
244 if the current cost of the common instructions, minus the cost for
245 setting up the inputs, is higher than what has been recorded before
246 in CHECKPOINT[N]. Also, if we do so, confirm or cancel any pending
247 changes. */
248 static void
249 struct_equiv_improve_checkpoint (struct struct_equiv_checkpoint *p,
250 struct equiv_info *info)
251 {
252 #ifdef HAVE_cc0
253 if (reg_mentioned_p (cc0_rtx, info->cur.x_start)
254 && !sets_cc0_p (info->cur.x_start))
255 return;
256 #endif
257 if (info->cur.input_count >= IMPOSSIBLE_MOVE_FACTOR)
258 return;
259 if (info->input_cost >= 0
260 ? (COSTS_N_INSNS(info->cur.ninsns - p->ninsns)
261 > info->input_cost * (info->cur.input_count - p->input_count))
262 : info->cur.ninsns > p->ninsns && !info->cur.input_count)
263 {
264 if (info->check_input_conflict && ! resolve_input_conflict (info))
265 return;
266 /* We have a profitable set of changes. If this is the final pass,
267 commit them now. Otherwise, we don't know yet if we can make any
268 change, so put the old code back for now. */
269 if (info->mode & STRUCT_EQUIV_FINAL)
270 confirm_change_group ();
271 else
272 cancel_changes (0);
273 struct_equiv_make_checkpoint (p, info);
274 }
275 }
276
277 /* Restore state about current inputs / local registers / liveness
278 from P. */
279 static void
280 struct_equiv_restore_checkpoint (struct struct_equiv_checkpoint *p,
281 struct equiv_info *info)
282 {
283 info->cur.ninsns = p->ninsns;
284 info->cur.x_start = p->x_start;
285 info->cur.y_start = p->y_start;
286 info->cur.input_count = p->input_count;
287 info->cur.input_valid = p->input_valid;
288 while (info->cur.local_count > p->local_count)
289 {
290 info->cur.local_count--;
291 info->cur.version--;
292 if (REGNO_REG_SET_P (info->x_local_live,
293 REGNO (info->x_local[info->cur.local_count])))
294 {
295 assign_reg_reg_set (info->x_local_live,
296 info->x_local[info->cur.local_count], 0);
297 assign_reg_reg_set (info->y_local_live,
298 info->y_local[info->cur.local_count], 0);
299 info->cur.version--;
300 }
301 }
302 if (info->cur.version != p->version)
303 info->need_rerun = true;
304 }
305
306
307 /* Update register liveness to reflect that X is now life (if rvalue is
308 nonzero) or dead (if rvalue is zero) in INFO->x_block, and likewise Y
309 in INFO->y_block. Return the number of registers the liveness of which
310 changed in each block (as a negative number if registers became dead). */
311 static int
312 note_local_live (struct equiv_info *info, rtx x, rtx y, int rvalue)
313 {
314 unsigned x_regno = REGNO (x);
315 unsigned y_regno = REGNO (y);
316 int x_nominal_nregs = (x_regno >= FIRST_PSEUDO_REGISTER
317 ? 1 : hard_regno_nregs[x_regno][GET_MODE (x)]);
318 int y_nominal_nregs = (y_regno >= FIRST_PSEUDO_REGISTER
319 ? 1 : hard_regno_nregs[y_regno][GET_MODE (y)]);
320 int x_change = assign_reg_reg_set (info->x_local_live, x, rvalue);
321 int y_change = assign_reg_reg_set (info->y_local_live, y, rvalue);
322
323 gcc_assert (x_nominal_nregs && y_nominal_nregs);
324 gcc_assert (x_change * y_nominal_nregs == y_change * x_nominal_nregs);
325 if (y_change)
326 {
327 if (reload_completed)
328 {
329 unsigned x_regno ATTRIBUTE_UNUSED = REGNO (x);
330 unsigned y_regno ATTRIBUTE_UNUSED = REGNO (y);
331 enum machine_mode x_mode = GET_MODE (x);
332
333 if (secondary_reload_class (0, REGNO_REG_CLASS (y_regno), x_mode, x)
334 != NO_REGS
335 #ifdef SECONDARY_MEMORY_NEEDED
336 || SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (y_regno),
337 REGNO_REG_CLASS (x_regno), x_mode)
338 #endif
339 )
340 y_change *= IMPOSSIBLE_MOVE_FACTOR;
341 }
342 info->cur.input_count += y_change;
343 info->cur.version++;
344 }
345 return x_change;
346 }
347
348 /* Check if *XP is equivalent to Y. Until an unreconcilable difference is
349 found, use in-group changes with validate_change on *XP to make register
350 assignments agree. It is the (not necessarily direct) callers
351 responsibility to verify / confirm / cancel these changes, as appropriate.
352 RVALUE indicates if the processed piece of rtl is used as a destination, in
353 which case we can't have different registers being an input. Returns
354 nonzero if the two blocks have been identified as equivalent, zero otherwise.
355 RVALUE == 0: destination
356 RVALUE == 1: source
357 RVALUE == -1: source, ignore SET_DEST of SET / clobber. */
358 bool
359 rtx_equiv_p (rtx *xp, rtx y, int rvalue, struct equiv_info *info)
360 {
361 rtx x = *xp;
362 enum rtx_code code;
363 int length;
364 const char *format;
365 int i;
366
367 if (!y || !x)
368 return x == y;
369 code = GET_CODE (y);
370 if (code != REG && x == y)
371 return true;
372 if (GET_CODE (x) != code
373 || GET_MODE (x) != GET_MODE (y))
374 return false;
375
376 /* ??? could extend to allow CONST_INT inputs. */
377 switch (code)
378 {
379 case REG:
380 {
381 unsigned x_regno = REGNO (x);
382 unsigned y_regno = REGNO (y);
383 int x_common_live, y_common_live;
384
385 if (reload_completed
386 && (x_regno >= FIRST_PSEUDO_REGISTER
387 || y_regno >= FIRST_PSEUDO_REGISTER))
388 {
389 /* We should only see this in REG_NOTEs. */
390 gcc_assert (!info->live_update);
391 /* Returning false will cause us to remove the notes. */
392 return false;
393 }
394 #ifdef STACK_REGS
395 /* After reg-stack, can only accept literal matches of stack regs. */
396 if (info->mode & CLEANUP_POST_REGSTACK
397 && (IN_RANGE (x_regno, FIRST_STACK_REG, LAST_STACK_REG)
398 || IN_RANGE (y_regno, FIRST_STACK_REG, LAST_STACK_REG)))
399 return x_regno == y_regno;
400 #endif
401
402 /* If the register is a locally live one in one block, the
403 corresponding one must be locally live in the other, too, and
404 match of identical regnos doesn't apply. */
405 if (REGNO_REG_SET_P (info->x_local_live, x_regno))
406 {
407 if (!REGNO_REG_SET_P (info->y_local_live, y_regno))
408 return false;
409 }
410 else if (REGNO_REG_SET_P (info->y_local_live, y_regno))
411 return false;
412 else if (x_regno == y_regno)
413 {
414 if (!rvalue && info->cur.input_valid
415 && (reg_overlap_mentioned_p (x, info->x_input)
416 || reg_overlap_mentioned_p (x, info->y_input)))
417 return false;
418
419 /* Update liveness information. */
420 if (info->live_update
421 && assign_reg_reg_set (info->common_live, x, rvalue))
422 info->cur.version++;
423
424 return true;
425 }
426
427 x_common_live = REGNO_REG_SET_P (info->common_live, x_regno);
428 y_common_live = REGNO_REG_SET_P (info->common_live, y_regno);
429 if (x_common_live != y_common_live)
430 return false;
431 else if (x_common_live)
432 {
433 if (! rvalue || info->input_cost < 0 || reload_completed)
434 return false;
435 /* If info->live_update is not set, we are processing notes.
436 We then allow a match with x_input / y_input found in a
437 previous pass. */
438 if (info->live_update && !info->cur.input_valid)
439 {
440 info->cur.input_valid = true;
441 info->x_input = x;
442 info->y_input = y;
443 info->cur.input_count += optimize_size ? 2 : 1;
444 if (info->input_reg
445 && GET_MODE (info->input_reg) != GET_MODE (info->x_input))
446 info->input_reg = NULL_RTX;
447 if (!info->input_reg)
448 info->input_reg = gen_reg_rtx (GET_MODE (info->x_input));
449 }
450 else if ((info->live_update
451 ? ! info->cur.input_valid : ! info->x_input)
452 || ! rtx_equal_p (x, info->x_input)
453 || ! rtx_equal_p (y, info->y_input))
454 return false;
455 validate_change (info->cur.x_start, xp, info->input_reg, 1);
456 }
457 else
458 {
459 int x_nregs = (x_regno >= FIRST_PSEUDO_REGISTER
460 ? 1 : hard_regno_nregs[x_regno][GET_MODE (x)]);
461 int y_nregs = (y_regno >= FIRST_PSEUDO_REGISTER
462 ? 1 : hard_regno_nregs[y_regno][GET_MODE (y)]);
463 int size = GET_MODE_SIZE (GET_MODE (x));
464 enum machine_mode x_mode = GET_MODE (x);
465 unsigned x_regno_i, y_regno_i;
466 int x_nregs_i, y_nregs_i, size_i;
467 int local_count = info->cur.local_count;
468
469 /* This might be a register local to each block. See if we have
470 it already registered. */
471 for (i = local_count - 1; i >= 0; i--)
472 {
473 x_regno_i = REGNO (info->x_local[i]);
474 x_nregs_i = (x_regno_i >= FIRST_PSEUDO_REGISTER
475 ? 1 : hard_regno_nregs[x_regno_i][GET_MODE (x)]);
476 y_regno_i = REGNO (info->y_local[i]);
477 y_nregs_i = (y_regno_i >= FIRST_PSEUDO_REGISTER
478 ? 1 : hard_regno_nregs[y_regno_i][GET_MODE (y)]);
479 size_i = GET_MODE_SIZE (GET_MODE (info->x_local[i]));
480
481 /* If we have a new pair of registers that is wider than an
482 old pair and enclosing it with matching offsets,
483 remove the old pair. If we find a matching, wider, old
484 pair, use the old one. If the width is the same, use the
485 old one if the modes match, but the new if they don't.
486 We don't want to get too fancy with subreg_regno_offset
487 here, so we just test two straightforward cases each. */
488 if (info->live_update
489 && (x_mode != GET_MODE (info->x_local[i])
490 ? size >= size_i : size > size_i))
491 {
492 /* If the new pair is fully enclosing a matching
493 existing pair, remove the old one. N.B. because
494 we are removing one entry here, the check below
495 if we have space for a new entry will succeed. */
496 if ((x_regno <= x_regno_i
497 && x_regno + x_nregs >= x_regno_i + x_nregs_i
498 && x_nregs == y_nregs && x_nregs_i == y_nregs_i
499 && x_regno - x_regno_i == y_regno - y_regno_i)
500 || (x_regno == x_regno_i && y_regno == y_regno_i
501 && x_nregs >= x_nregs_i && y_nregs >= y_nregs_i))
502 {
503 info->cur.local_count = --local_count;
504 info->x_local[i] = info->x_local[local_count];
505 info->y_local[i] = info->y_local[local_count];
506 continue;
507 }
508 }
509 else
510 {
511
512 /* If the new pair is fully enclosed within a matching
513 existing pair, succeed. */
514 if (x_regno >= x_regno_i
515 && x_regno + x_nregs <= x_regno_i + x_nregs_i
516 && x_nregs == y_nregs && x_nregs_i == y_nregs_i
517 && x_regno - x_regno_i == y_regno - y_regno_i)
518 break;
519 if (x_regno == x_regno_i && y_regno == y_regno_i
520 && x_nregs <= x_nregs_i && y_nregs <= y_nregs_i)
521 break;
522 }
523
524 /* Any other overlap causes a match failure. */
525 if (x_regno + x_nregs > x_regno_i
526 && x_regno_i + x_nregs_i > x_regno)
527 return false;
528 if (y_regno + y_nregs > y_regno_i
529 && y_regno_i + y_nregs_i > y_regno)
530 return false;
531 }
532 if (i < 0)
533 {
534 /* Not found. Create a new entry if possible. */
535 if (!info->live_update
536 || info->cur.local_count >= STRUCT_EQUIV_MAX_LOCAL)
537 return false;
538 info->x_local[info->cur.local_count] = x;
539 info->y_local[info->cur.local_count] = y;
540 info->cur.local_count++;
541 info->cur.version++;
542 }
543 note_local_live (info, x, y, rvalue);
544 }
545 return true;
546 }
547 case SET:
548 gcc_assert (rvalue < 0);
549 /* Ignore the destinations role as a destination. Still, we have
550 to consider input registers embedded in the addresses of a MEM.
551 N.B., we process the rvalue aspect of STRICT_LOW_PART /
552 ZERO_EXTEND / SIGN_EXTEND along with their lvalue aspect. */
553 if(!set_dest_addr_equiv_p (SET_DEST (x), SET_DEST (y), info))
554 return false;
555 /* Process source. */
556 return rtx_equiv_p (&SET_SRC (x), SET_SRC (y), 1, info);
557 case PRE_MODIFY:
558 /* Process destination. */
559 if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
560 return false;
561 /* Process source. */
562 return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
563 case POST_MODIFY:
564 {
565 rtx x_dest0, x_dest1;
566
567 /* Process destination. */
568 x_dest0 = XEXP (x, 0);
569 gcc_assert (REG_P (x_dest0));
570 if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
571 return false;
572 x_dest1 = XEXP (x, 0);
573 /* validate_change might have changed the destination. Put it back
574 so that we can do a proper match for its role as an input. */
575 XEXP (x, 0) = x_dest0;
576 if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
577 return false;
578 gcc_assert (x_dest1 == XEXP (x, 0));
579 /* Process source. */
580 return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
581 }
582 case CLOBBER:
583 gcc_assert (rvalue < 0);
584 return true;
585 /* Some special forms are also rvalues when they appear in lvalue
586 positions. However, we must ont try to match a register after we
587 have already altered it with validate_change, consider the rvalue
588 aspect while we process the lvalue. */
589 case STRICT_LOW_PART:
590 case ZERO_EXTEND:
591 case SIGN_EXTEND:
592 {
593 rtx x_inner, y_inner;
594 enum rtx_code code;
595 int change;
596
597 if (rvalue)
598 break;
599 x_inner = XEXP (x, 0);
600 y_inner = XEXP (y, 0);
601 if (GET_MODE (x_inner) != GET_MODE (y_inner))
602 return false;
603 code = GET_CODE (x_inner);
604 if (code != GET_CODE (y_inner))
605 return false;
606 /* The address of a MEM is an input that will be processed during
607 rvalue == -1 processing. */
608 if (code == SUBREG)
609 {
610 if (SUBREG_BYTE (x_inner) != SUBREG_BYTE (y_inner))
611 return false;
612 x = x_inner;
613 x_inner = SUBREG_REG (x_inner);
614 y_inner = SUBREG_REG (y_inner);
615 if (GET_MODE (x_inner) != GET_MODE (y_inner))
616 return false;
617 code = GET_CODE (x_inner);
618 if (code != GET_CODE (y_inner))
619 return false;
620 }
621 if (code == MEM)
622 return true;
623 gcc_assert (code == REG);
624 if (! rtx_equiv_p (&XEXP (x, 0), y_inner, rvalue, info))
625 return false;
626 if (REGNO (x_inner) == REGNO (y_inner))
627 {
628 change = assign_reg_reg_set (info->common_live, x_inner, 1);
629 info->cur.version++;
630 }
631 else
632 change = note_local_live (info, x_inner, y_inner, 1);
633 gcc_assert (change);
634 return true;
635 }
636 /* The AUTO_INC / POST_MODIFY / PRE_MODIFY sets are modelled to take
637 place during input processing, however, that is benign, since they
638 are paired with reads. */
639 case MEM:
640 return !rvalue || rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info);
641 case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
642 return (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info)
643 && rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info));
644 case PARALLEL:
645 /* If this is a top-level PATTERN PARALLEL, we expect the caller to
646 have handled the SET_DESTs. A complex or vector PARALLEL can be
647 identified by having a mode. */
648 gcc_assert (rvalue < 0 || GET_MODE (x) != VOIDmode);
649 break;
650 case LABEL_REF:
651 /* Check special tablejump match case. */
652 if (XEXP (y, 0) == info->y_label)
653 return (XEXP (x, 0) == info->x_label);
654 /* We can't assume nonlocal labels have their following insns yet. */
655 if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
656 return XEXP (x, 0) == XEXP (y, 0);
657
658 /* Two label-refs are equivalent if they point at labels
659 in the same position in the instruction stream. */
660 return (next_real_insn (XEXP (x, 0))
661 == next_real_insn (XEXP (y, 0)));
662 case SYMBOL_REF:
663 return XSTR (x, 0) == XSTR (y, 0);
664 /* Some rtl is guaranteed to be shared, or unique; If we didn't match
665 EQ equality above, they aren't the same. */
666 case CONST_INT:
667 case CODE_LABEL:
668 return false;
669 default:
670 break;
671 }
672
673 /* For commutative operations, the RTX match if the operands match in any
674 order. */
675 if (targetm.commutative_p (x, UNKNOWN))
676 return ((rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info)
677 && rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), rvalue, info))
678 || (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 1), rvalue, info)
679 && rtx_equiv_p (&XEXP (x, 1), XEXP (y, 0), rvalue, info)));
680
681 /* Process subexpressions - this is similar to rtx_equal_p. */
682 length = GET_RTX_LENGTH (code);
683 format = GET_RTX_FORMAT (code);
684
685 for (i = 0; i < length; ++i)
686 {
687 switch (format[i])
688 {
689 case 'w':
690 if (XWINT (x, i) != XWINT (y, i))
691 return false;
692 break;
693 case 'n':
694 case 'i':
695 if (XINT (x, i) != XINT (y, i))
696 return false;
697 break;
698 case 'V':
699 case 'E':
700 if (XVECLEN (x, i) != XVECLEN (y, i))
701 return false;
702 if (XVEC (x, i) != 0)
703 {
704 int j;
705 for (j = 0; j < XVECLEN (x, i); ++j)
706 {
707 if (! rtx_equiv_p (&XVECEXP (x, i, j), XVECEXP (y, i, j),
708 rvalue, info))
709 return false;
710 }
711 }
712 break;
713 case 'e':
714 if (! rtx_equiv_p (&XEXP (x, i), XEXP (y, i), rvalue, info))
715 return false;
716 break;
717 case 'S':
718 case 's':
719 if ((XSTR (x, i) || XSTR (y, i))
720 && (! XSTR (x, i) || ! XSTR (y, i)
721 || strcmp (XSTR (x, i), XSTR (y, i))))
722 return false;
723 break;
724 case 'u':
725 /* These are just backpointers, so they don't matter. */
726 break;
727 case '0':
728 case 't':
729 break;
730 /* It is believed that rtx's at this level will never
731 contain anything but integers and other rtx's,
732 except for within LABEL_REFs and SYMBOL_REFs. */
733 default:
734 gcc_unreachable ();
735 }
736 }
737 return true;
738 }
739
740 /* Do only the rtx_equiv_p SET_DEST processing for SETs and CLOBBERs.
741 Since we are scanning backwards, this the first step in processing each
742 insn. Return true for success. */
743 static bool
744 set_dest_equiv_p (rtx x, rtx y, struct equiv_info *info)
745 {
746 if (!x || !y)
747 return x == y;
748 if (GET_CODE (x) != GET_CODE (y))
749 return false;
750 else if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
751 return rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info);
752 else if (GET_CODE (x) == PARALLEL)
753 {
754 int j;
755
756 if (XVECLEN (x, 0) != XVECLEN (y, 0))
757 return false;
758 for (j = 0; j < XVECLEN (x, 0); ++j)
759 {
760 rtx xe = XVECEXP (x, 0, j);
761 rtx ye = XVECEXP (y, 0, j);
762
763 if (GET_CODE (xe) != GET_CODE (ye))
764 return false;
765 if ((GET_CODE (xe) == SET || GET_CODE (xe) == CLOBBER)
766 && ! rtx_equiv_p (&XEXP (xe, 0), XEXP (ye, 0), 0, info))
767 return false;
768 }
769 }
770 return true;
771 }
772
773 /* Process MEMs in SET_DEST destinations. We must not process this together
774 with REG SET_DESTs, but must do it separately, lest when we see
775 [(set (reg:SI foo) (bar))
776 (set (mem:SI (reg:SI foo) (baz)))]
777 struct_equiv_block_eq could get confused to assume that (reg:SI foo)
778 is not live before this instruction. */
779 static bool
780 set_dest_addr_equiv_p (rtx x, rtx y, struct equiv_info *info)
781 {
782 enum rtx_code code = GET_CODE (x);
783 int length;
784 const char *format;
785 int i;
786
787 if (code != GET_CODE (y))
788 return false;
789 if (code == MEM)
790 return rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info);
791
792 /* Process subexpressions. */
793 length = GET_RTX_LENGTH (code);
794 format = GET_RTX_FORMAT (code);
795
796 for (i = 0; i < length; ++i)
797 {
798 switch (format[i])
799 {
800 case 'V':
801 case 'E':
802 if (XVECLEN (x, i) != XVECLEN (y, i))
803 return false;
804 if (XVEC (x, i) != 0)
805 {
806 int j;
807 for (j = 0; j < XVECLEN (x, i); ++j)
808 {
809 if (! set_dest_addr_equiv_p (XVECEXP (x, i, j),
810 XVECEXP (y, i, j), info))
811 return false;
812 }
813 }
814 break;
815 case 'e':
816 if (! set_dest_addr_equiv_p (XEXP (x, i), XEXP (y, i), info))
817 return false;
818 break;
819 default:
820 break;
821 }
822 }
823 return true;
824 }
825
826 /* Check if the set of REG_DEAD notes attached to I1 and I2 allows us to
827 go ahead with merging I1 and I2, which otherwise look fine.
828 Inputs / local registers for the inputs of I1 and I2 have already been
829 set up. */
830 static bool
831 death_notes_match_p (rtx i1 ATTRIBUTE_UNUSED, rtx i2 ATTRIBUTE_UNUSED,
832 struct equiv_info *info ATTRIBUTE_UNUSED)
833 {
834 #ifdef STACK_REGS
835 /* If cross_jump_death_matters is not 0, the insn's mode
836 indicates whether or not the insn contains any stack-like regs. */
837
838 if ((info->mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
839 {
840 /* If register stack conversion has already been done, then
841 death notes must also be compared before it is certain that
842 the two instruction streams match. */
843
844 rtx note;
845 HARD_REG_SET i1_regset, i2_regset;
846
847 CLEAR_HARD_REG_SET (i1_regset);
848 CLEAR_HARD_REG_SET (i2_regset);
849
850 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
851 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
852 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
853
854 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
855 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
856 {
857 unsigned regno = REGNO (XEXP (note, 0));
858 int i;
859
860 for (i = info->cur.local_count - 1; i >= 0; i--)
861 if (regno == REGNO (info->y_local[i]))
862 {
863 regno = REGNO (info->x_local[i]);
864 break;
865 }
866 SET_HARD_REG_BIT (i2_regset, regno);
867 }
868
869 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
870 return false;
871 }
872 #endif
873 return true;
874 }
875
876 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
877
878 bool
879 insns_match_p (rtx i1, rtx i2, struct equiv_info *info)
880 {
881 int rvalue_change_start;
882 struct struct_equiv_checkpoint before_rvalue_change;
883
884 /* Verify that I1 and I2 are equivalent. */
885 if (GET_CODE (i1) != GET_CODE (i2))
886 return false;
887
888 info->cur.x_start = i1;
889 info->cur.y_start = i2;
890
891 /* If this is a CALL_INSN, compare register usage information.
892 If we don't check this on stack register machines, the two
893 CALL_INSNs might be merged leaving reg-stack.c with mismatching
894 numbers of stack registers in the same basic block.
895 If we don't check this on machines with delay slots, a delay slot may
896 be filled that clobbers a parameter expected by the subroutine.
897
898 ??? We take the simple route for now and assume that if they're
899 equal, they were constructed identically. */
900
901 if (CALL_P (i1))
902 {
903 if (SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)
904 || ! set_dest_equiv_p (PATTERN (i1), PATTERN (i2), info)
905 || ! set_dest_equiv_p (CALL_INSN_FUNCTION_USAGE (i1),
906 CALL_INSN_FUNCTION_USAGE (i2), info)
907 || ! rtx_equiv_p (&CALL_INSN_FUNCTION_USAGE (i1),
908 CALL_INSN_FUNCTION_USAGE (i2), -1, info))
909 {
910 cancel_changes (0);
911 return false;
912 }
913 }
914 else if (INSN_P (i1))
915 {
916 if (! set_dest_equiv_p (PATTERN (i1), PATTERN (i2), info))
917 {
918 cancel_changes (0);
919 return false;
920 }
921 }
922 rvalue_change_start = num_validated_changes ();
923 struct_equiv_make_checkpoint (&before_rvalue_change, info);
924 /* Check death_notes_match_p *after* the inputs have been processed,
925 so that local inputs will already have been set up. */
926 if (! INSN_P (i1)
927 || (!bitmap_bit_p (info->equiv_used, info->cur.ninsns)
928 && rtx_equiv_p (&PATTERN (i1), PATTERN (i2), -1, info)
929 && death_notes_match_p (i1, i2, info)
930 && verify_changes (0)))
931 return true;
932
933 /* Do not do EQUIV substitution after reload. First, we're undoing the
934 work of reload_cse. Second, we may be undoing the work of the post-
935 reload splitting pass. */
936 /* ??? Possibly add a new phase switch variable that can be used by
937 targets to disallow the troublesome insns after splitting. */
938 if (!reload_completed)
939 {
940 rtx equiv1, equiv2;
941
942 cancel_changes (rvalue_change_start);
943 struct_equiv_restore_checkpoint (&before_rvalue_change, info);
944
945 /* The following code helps take care of G++ cleanups. */
946 equiv1 = find_reg_equal_equiv_note (i1);
947 equiv2 = find_reg_equal_equiv_note (i2);
948 if (equiv1 && equiv2
949 /* If the equivalences are not to a constant, they may
950 reference pseudos that no longer exist, so we can't
951 use them. */
952 && (! reload_completed
953 || (CONSTANT_P (XEXP (equiv1, 0))
954 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
955 {
956 rtx s1 = single_set (i1);
957 rtx s2 = single_set (i2);
958
959 if (s1 != 0 && s2 != 0)
960 {
961 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
962 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
963 /* Only inspecting the new SET_SRC is not good enough,
964 because there may also be bare USEs in a single_set
965 PARALLEL. */
966 if (rtx_equiv_p (&PATTERN (i1), PATTERN (i2), -1, info)
967 && death_notes_match_p (i1, i2, info)
968 && verify_changes (0))
969 {
970 /* Mark this insn so that we'll use the equivalence in
971 all subsequent passes. */
972 bitmap_set_bit (info->equiv_used, info->cur.ninsns);
973 return true;
974 }
975 }
976 }
977 }
978
979 cancel_changes (0);
980 return false;
981 }
982
983 /* Set up mode and register information in INFO. Return true for success. */
984 bool
985 struct_equiv_init (int mode, struct equiv_info *info)
986 {
987 if (!REG_SET_EQUAL_P (DF_LR_OUT (info->x_block),
988 DF_LR_OUT (info->y_block)))
989 {
990 #ifdef STACK_REGS
991 unsigned rn;
992
993 if (!(mode & CLEANUP_POST_REGSTACK))
994 return false;
995 /* After reg-stack. Remove bogus live info about stack regs. N.B.
996 these regs are not necessarily all dead - we swap random bogosity
997 against constant bogosity. However, clearing these bits at
998 least makes the regsets comparable. */
999 for (rn = FIRST_STACK_REG; rn <= LAST_STACK_REG; rn++)
1000 {
1001 CLEAR_REGNO_REG_SET (DF_LR_OUT (info->x_block), rn);
1002 CLEAR_REGNO_REG_SET (DF_LR_OUT (info->y_block), rn);
1003 }
1004 if (!REG_SET_EQUAL_P (DF_LR_OUT (info->x_block),
1005 DF_LR_OUT (info->y_block)))
1006 #endif
1007 return false;
1008 }
1009 info->mode = mode;
1010 if (mode & STRUCT_EQUIV_START)
1011 {
1012 info->x_input = info->y_input = info->input_reg = NULL_RTX;
1013 info->equiv_used = ALLOC_REG_SET (&reg_obstack);
1014 info->check_input_conflict = false;
1015 }
1016 info->had_input_conflict = false;
1017 info->cur.ninsns = info->cur.version = 0;
1018 info->cur.local_count = info->cur.input_count = 0;
1019 info->cur.x_start = info->cur.y_start = NULL_RTX;
1020 info->x_label = info->y_label = NULL_RTX;
1021 info->need_rerun = false;
1022 info->live_update = true;
1023 info->cur.input_valid = false;
1024 info->common_live = ALLOC_REG_SET (&reg_obstack);
1025 info->x_local_live = ALLOC_REG_SET (&reg_obstack);
1026 info->y_local_live = ALLOC_REG_SET (&reg_obstack);
1027 COPY_REG_SET (info->common_live, DF_LR_OUT (info->x_block));
1028 struct_equiv_make_checkpoint (&info->best_match, info);
1029 return true;
1030 }
1031
1032 /* Insns XI and YI have been matched. Merge memory attributes and reg
1033 notes. */
1034 static void
1035 struct_equiv_merge (rtx xi, rtx yi, struct equiv_info *info)
1036 {
1037 rtx equiv1, equiv2;
1038
1039 merge_memattrs (xi, yi);
1040
1041 /* If the merged insns have different REG_EQUAL notes, then
1042 remove them. */
1043 info->live_update = false;
1044 equiv1 = find_reg_equal_equiv_note (xi);
1045 equiv2 = find_reg_equal_equiv_note (yi);
1046 if (equiv1 && !equiv2)
1047 remove_note (xi, equiv1);
1048 else if (!equiv1 && equiv2)
1049 remove_note (yi, equiv2);
1050 else if (equiv1 && equiv2
1051 && !rtx_equiv_p (&XEXP (equiv1, 0), XEXP (equiv2, 0),
1052 1, info))
1053 {
1054 remove_note (xi, equiv1);
1055 remove_note (yi, equiv2);
1056 }
1057 info->live_update = true;
1058 }
1059
1060 /* Return number of matched insns.
1061 This function must be called up to three times for a successful cross-jump
1062 match:
1063 first to find out which instructions do match. While trying to match
1064 another instruction that doesn't match, we destroy information in info
1065 about the actual inputs. So if there have been any before the last
1066 match attempt, we need to call this function again to recompute the
1067 actual inputs up to the actual start of the matching sequence.
1068 When we are then satisfied that the cross-jump is worthwhile, we
1069 call this function a third time to make any changes needed to make the
1070 sequences match: apply equivalences, remove non-matching
1071 notes and merge memory attributes. */
1072 int
1073 struct_equiv_block_eq (int mode, struct equiv_info *info)
1074 {
1075 rtx x_stop, y_stop;
1076 rtx xi, yi;
1077 int i;
1078
1079 if (mode & STRUCT_EQUIV_START)
1080 {
1081 x_stop = BB_HEAD (info->x_block);
1082 y_stop = BB_HEAD (info->y_block);
1083 if (!x_stop || !y_stop)
1084 return 0;
1085 }
1086 else
1087 {
1088 x_stop = info->cur.x_start;
1089 y_stop = info->cur.y_start;
1090 }
1091 if (!struct_equiv_init (mode, info))
1092 gcc_unreachable ();
1093
1094 /* Skip simple jumps at the end of the blocks. Complex jumps still
1095 need to be compared for equivalence, which we'll do below. */
1096
1097 xi = BB_END (info->x_block);
1098 if (onlyjump_p (xi)
1099 || (returnjump_p (xi) && !side_effects_p (PATTERN (xi))))
1100 {
1101 info->cur.x_start = xi;
1102 xi = PREV_INSN (xi);
1103 }
1104
1105 yi = BB_END (info->y_block);
1106 if (onlyjump_p (yi)
1107 || (returnjump_p (yi) && !side_effects_p (PATTERN (yi))))
1108 {
1109 info->cur.y_start = yi;
1110 /* Count everything except for unconditional jump as insn. */
1111 /* ??? Is it right to count unconditional jumps with a clobber?
1112 Should we count conditional returns? */
1113 if (!simplejump_p (yi) && !returnjump_p (yi) && info->cur.x_start)
1114 info->cur.ninsns++;
1115 yi = PREV_INSN (yi);
1116 }
1117
1118 if (mode & STRUCT_EQUIV_MATCH_JUMPS)
1119 {
1120 /* The caller is expected to have compared the jumps already, but we
1121 need to match them again to get any local registers and inputs. */
1122 gcc_assert (!info->cur.x_start == !info->cur.y_start);
1123 if (info->cur.x_start)
1124 {
1125 if (any_condjump_p (info->cur.x_start)
1126 ? !condjump_equiv_p (info, false)
1127 : !insns_match_p (info->cur.x_start, info->cur.y_start, info))
1128 gcc_unreachable ();
1129 }
1130 else if (any_condjump_p (xi) && any_condjump_p (yi))
1131 {
1132 info->cur.x_start = xi;
1133 info->cur.y_start = yi;
1134 xi = PREV_INSN (xi);
1135 yi = PREV_INSN (yi);
1136 info->cur.ninsns++;
1137 if (!condjump_equiv_p (info, false))
1138 gcc_unreachable ();
1139 }
1140 if (info->cur.x_start && info->mode & STRUCT_EQUIV_FINAL)
1141 struct_equiv_merge (info->cur.x_start, info->cur.y_start, info);
1142 }
1143
1144 struct_equiv_improve_checkpoint (&info->best_match, info);
1145 info->x_end = xi;
1146 info->y_end = yi;
1147 if (info->cur.x_start != x_stop)
1148 for (;;)
1149 {
1150 /* Ignore notes. */
1151 while (!INSN_P (xi) && xi != x_stop)
1152 xi = PREV_INSN (xi);
1153
1154 while (!INSN_P (yi) && yi != y_stop)
1155 yi = PREV_INSN (yi);
1156
1157 if (!insns_match_p (xi, yi, info))
1158 break;
1159 if (INSN_P (xi))
1160 {
1161 if (info->mode & STRUCT_EQUIV_FINAL)
1162 struct_equiv_merge (xi, yi, info);
1163 info->cur.ninsns++;
1164 struct_equiv_improve_checkpoint (&info->best_match, info);
1165 }
1166 if (xi == x_stop || yi == y_stop)
1167 {
1168 /* If we reached the start of at least one of the blocks, but
1169 best_match hasn't been advanced back to the first valid insn
1170 yet, represent the increased benefit of completing the block
1171 as an increased instruction count. */
1172 if (info->best_match.x_start != info->cur.x_start
1173 && (xi == BB_HEAD (info->x_block)
1174 || yi == BB_HEAD (info->y_block)))
1175 {
1176 info->cur.ninsns++;
1177 struct_equiv_improve_checkpoint (&info->best_match, info);
1178 info->cur.ninsns--;
1179 if (info->best_match.ninsns > info->cur.ninsns)
1180 info->best_match.ninsns = info->cur.ninsns;
1181 }
1182 break;
1183 }
1184 xi = PREV_INSN (xi);
1185 yi = PREV_INSN (yi);
1186 }
1187
1188 /* If we failed to match an insn, but had some changes registered from
1189 trying to make the insns match, we need to cancel these changes now. */
1190 cancel_changes (0);
1191 /* Restore to best_match to get the sequence with the best known-so-far
1192 cost-benefit difference. */
1193 struct_equiv_restore_checkpoint (&info->best_match, info);
1194
1195 /* Include preceding notes and labels in the cross-jump / if-conversion.
1196 One, this may bring us to the head of the blocks.
1197 Two, it keeps line number notes as matched as may be. */
1198 if (info->cur.ninsns)
1199 {
1200 xi = info->cur.x_start;
1201 yi = info->cur.y_start;
1202 while (xi != x_stop && !INSN_P (PREV_INSN (xi)))
1203 xi = PREV_INSN (xi);
1204
1205 while (yi != y_stop && !INSN_P (PREV_INSN (yi)))
1206 yi = PREV_INSN (yi);
1207
1208 info->cur.x_start = xi;
1209 info->cur.y_start = yi;
1210 }
1211
1212 if (!info->cur.input_valid)
1213 info->x_input = info->y_input = info->input_reg = NULL_RTX;
1214 if (!info->need_rerun)
1215 {
1216 find_dying_inputs (info);
1217 if (info->mode & STRUCT_EQUIV_FINAL)
1218 {
1219 if (info->check_input_conflict && ! resolve_input_conflict (info))
1220 gcc_unreachable ();
1221 }
1222 else
1223 {
1224 bool input_conflict = info->had_input_conflict;
1225
1226 if (!input_conflict
1227 && info->dying_inputs > 1
1228 && bitmap_intersect_p (info->x_local_live, info->y_local_live))
1229 {
1230 regset_head clobbered_regs;
1231
1232 INIT_REG_SET (&clobbered_regs);
1233 for (i = 0; i < info->cur.local_count; i++)
1234 {
1235 if (assign_reg_reg_set (&clobbered_regs, info->y_local[i], 0))
1236 {
1237 input_conflict = true;
1238 break;
1239 }
1240 assign_reg_reg_set (&clobbered_regs, info->x_local[i], 1);
1241 }
1242 CLEAR_REG_SET (&clobbered_regs);
1243 }
1244 if (input_conflict && !info->check_input_conflict)
1245 info->need_rerun = true;
1246 info->check_input_conflict = input_conflict;
1247 }
1248 }
1249
1250 if (info->mode & STRUCT_EQUIV_NEED_FULL_BLOCK
1251 && (info->cur.x_start != x_stop || info->cur.y_start != y_stop))
1252 return 0;
1253 return info->cur.ninsns;
1254 }
1255
1256 /* For each local register, set info->local_rvalue to true iff the register
1257 is a dying input. Store the total number of these in info->dying_inputs. */
1258 static void
1259 find_dying_inputs (struct equiv_info *info)
1260 {
1261 int i;
1262
1263 info->dying_inputs = 0;
1264 for (i = info->cur.local_count-1; i >=0; i--)
1265 {
1266 rtx x = info->x_local[i];
1267 unsigned regno = REGNO (x);
1268 int nregs = (regno >= FIRST_PSEUDO_REGISTER
1269 ? 1 : hard_regno_nregs[regno][GET_MODE (x)]);
1270
1271 for (info->local_rvalue[i] = false; nregs > 0; regno++, --nregs)
1272 if (REGNO_REG_SET_P (info->x_local_live, regno))
1273 {
1274 info->dying_inputs++;
1275 info->local_rvalue[i] = true;
1276 break;
1277 }
1278 }
1279 }
1280
1281 /* For each local register that is a dying input, y_local[i] will be
1282 copied to x_local[i]. We'll do this in ascending order. Try to
1283 re-order the locals to avoid conflicts like r3 = r2; r4 = r3; .
1284 Return true iff the re-ordering is successful, or not necessary. */
1285 static bool
1286 resolve_input_conflict (struct equiv_info *info)
1287 {
1288 int i, j, end;
1289 int nswaps = 0;
1290 rtx save_x_local[STRUCT_EQUIV_MAX_LOCAL];
1291 rtx save_y_local[STRUCT_EQUIV_MAX_LOCAL];
1292
1293 find_dying_inputs (info);
1294 if (info->dying_inputs <= 1)
1295 return true;
1296 memcpy (save_x_local, info->x_local, sizeof save_x_local);
1297 memcpy (save_y_local, info->y_local, sizeof save_y_local);
1298 end = info->cur.local_count - 1;
1299 for (i = 0; i <= end; i++)
1300 {
1301 /* Cycle detection with regsets is expensive, so we just check that
1302 we don't exceed the maximum number of swaps needed in the acyclic
1303 case. */
1304 int max_swaps = end - i;
1305
1306 /* Check if x_local[i] will be clobbered. */
1307 if (!info->local_rvalue[i])
1308 continue;
1309 /* Check if any later value needs to be copied earlier. */
1310 for (j = i + 1; j <= end; j++)
1311 {
1312 rtx tmp;
1313
1314 if (!info->local_rvalue[j])
1315 continue;
1316 if (!reg_overlap_mentioned_p (info->x_local[i], info->y_local[j]))
1317 continue;
1318 if (--max_swaps < 0)
1319 {
1320 memcpy (info->x_local, save_x_local, sizeof save_x_local);
1321 memcpy (info->y_local, save_y_local, sizeof save_y_local);
1322 return false;
1323 }
1324 nswaps++;
1325 tmp = info->x_local[i];
1326 info->x_local[i] = info->x_local[j];
1327 info->x_local[j] = tmp;
1328 tmp = info->y_local[i];
1329 info->y_local[i] = info->y_local[j];
1330 info->y_local[j] = tmp;
1331 j = i;
1332 }
1333 }
1334 info->had_input_conflict = true;
1335 if (dump_file && nswaps)
1336 fprintf (dump_file, "Resolved input conflict, %d %s.\n",
1337 nswaps, nswaps == 1 ? "swap" : "swaps");
1338 return true;
1339 }