]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/rtlanal.c
re PR testsuite/27476 (ACATS: Ada testsuite Bourne shell compatibility problem on...
[thirdparty/gcc.git] / gcc / rtlanal.c
1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software
4 Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "target.h"
34 #include "output.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "real.h"
38 #include "regs.h"
39 #include "function.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, rtx, void *);
43 static bool covers_regno_p (rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (rtx);
47 static void parms_set (rtx, rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode,
50 rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx,
53 enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Bit flags that specify the machine subtype we are compiling for.
66 Bits are tested using macros TARGET_... defined in the tm.h file
67 and set by `-m...' switches. Must be defined in rtlanal.c. */
68
69 int target_flags;
70
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
75
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
82 DESTINATION. */
83
84 static unsigned int
85 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
86 \f
87 /* Return 1 if the value of X is unstable
88 (would be different at a different point in the program).
89 The frame pointer, arg pointer, etc. are considered stable
90 (within one function) and so is anything marked `unchanging'. */
91
92 int
93 rtx_unstable_p (rtx x)
94 {
95 RTX_CODE code = GET_CODE (x);
96 int i;
97 const char *fmt;
98
99 switch (code)
100 {
101 case MEM:
102 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
103
104 case CONST:
105 case CONST_INT:
106 case CONST_DOUBLE:
107 case CONST_VECTOR:
108 case SYMBOL_REF:
109 case LABEL_REF:
110 return 0;
111
112 case REG:
113 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
114 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
115 /* The arg pointer varies if it is not a fixed register. */
116 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
117 return 0;
118 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
119 /* ??? When call-clobbered, the value is stable modulo the restore
120 that must happen after a call. This currently screws up local-alloc
121 into believing that the restore is not needed. */
122 if (x == pic_offset_table_rtx)
123 return 0;
124 #endif
125 return 1;
126
127 case ASM_OPERANDS:
128 if (MEM_VOLATILE_P (x))
129 return 1;
130
131 /* Fall through. */
132
133 default:
134 break;
135 }
136
137 fmt = GET_RTX_FORMAT (code);
138 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
139 if (fmt[i] == 'e')
140 {
141 if (rtx_unstable_p (XEXP (x, i)))
142 return 1;
143 }
144 else if (fmt[i] == 'E')
145 {
146 int j;
147 for (j = 0; j < XVECLEN (x, i); j++)
148 if (rtx_unstable_p (XVECEXP (x, i, j)))
149 return 1;
150 }
151
152 return 0;
153 }
154
155 /* Return 1 if X has a value that can vary even between two
156 executions of the program. 0 means X can be compared reliably
157 against certain constants or near-constants.
158 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
159 zero, we are slightly more conservative.
160 The frame pointer and the arg pointer are considered constant. */
161
162 int
163 rtx_varies_p (rtx x, int for_alias)
164 {
165 RTX_CODE code;
166 int i;
167 const char *fmt;
168
169 if (!x)
170 return 0;
171
172 code = GET_CODE (x);
173 switch (code)
174 {
175 case MEM:
176 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
177
178 case CONST:
179 case CONST_INT:
180 case CONST_DOUBLE:
181 case CONST_VECTOR:
182 case SYMBOL_REF:
183 case LABEL_REF:
184 return 0;
185
186 case REG:
187 /* Note that we have to test for the actual rtx used for the frame
188 and arg pointers and not just the register number in case we have
189 eliminated the frame and/or arg pointer and are using it
190 for pseudos. */
191 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
192 /* The arg pointer varies if it is not a fixed register. */
193 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
194 return 0;
195 if (x == pic_offset_table_rtx
196 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
197 /* ??? When call-clobbered, the value is stable modulo the restore
198 that must happen after a call. This currently screws up
199 local-alloc into believing that the restore is not needed, so we
200 must return 0 only if we are called from alias analysis. */
201 && for_alias
202 #endif
203 )
204 return 0;
205 return 1;
206
207 case LO_SUM:
208 /* The operand 0 of a LO_SUM is considered constant
209 (in fact it is related specifically to operand 1)
210 during alias analysis. */
211 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
212 || rtx_varies_p (XEXP (x, 1), for_alias);
213
214 case ASM_OPERANDS:
215 if (MEM_VOLATILE_P (x))
216 return 1;
217
218 /* Fall through. */
219
220 default:
221 break;
222 }
223
224 fmt = GET_RTX_FORMAT (code);
225 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
226 if (fmt[i] == 'e')
227 {
228 if (rtx_varies_p (XEXP (x, i), for_alias))
229 return 1;
230 }
231 else if (fmt[i] == 'E')
232 {
233 int j;
234 for (j = 0; j < XVECLEN (x, i); j++)
235 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
236 return 1;
237 }
238
239 return 0;
240 }
241
242 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
243 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
244 whether nonzero is returned for unaligned memory accesses on strict
245 alignment machines. */
246
247 static int
248 rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems)
249 {
250 enum rtx_code code = GET_CODE (x);
251
252 switch (code)
253 {
254 case SYMBOL_REF:
255 return SYMBOL_REF_WEAK (x);
256
257 case LABEL_REF:
258 return 0;
259
260 case REG:
261 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
262 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
263 || x == stack_pointer_rtx
264 /* The arg pointer varies if it is not a fixed register. */
265 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
266 return 0;
267 /* All of the virtual frame registers are stack references. */
268 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
269 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
270 return 0;
271 return 1;
272
273 case CONST:
274 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
275
276 case PLUS:
277 /* An address is assumed not to trap if:
278 - it is an address that can't trap plus a constant integer,
279 with the proper remainder modulo the mode size if we are
280 considering unaligned memory references. */
281 if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
282 && GET_CODE (XEXP (x, 1)) == CONST_INT)
283 {
284 HOST_WIDE_INT offset;
285
286 if (!STRICT_ALIGNMENT
287 || !unaligned_mems
288 || GET_MODE_SIZE (mode) == 0)
289 return 0;
290
291 offset = INTVAL (XEXP (x, 1));
292
293 #ifdef SPARC_STACK_BOUNDARY_HACK
294 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
295 the real alignment of %sp. However, when it does this, the
296 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
297 if (SPARC_STACK_BOUNDARY_HACK
298 && (XEXP (x, 0) == stack_pointer_rtx
299 || XEXP (x, 0) == hard_frame_pointer_rtx))
300 offset -= STACK_POINTER_OFFSET;
301 #endif
302
303 return offset % GET_MODE_SIZE (mode) != 0;
304 }
305
306 /* - or it is the pic register plus a constant. */
307 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
308 return 0;
309
310 return 1;
311
312 case LO_SUM:
313 case PRE_MODIFY:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
315
316 case PRE_DEC:
317 case PRE_INC:
318 case POST_DEC:
319 case POST_INC:
320 case POST_MODIFY:
321 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
322
323 default:
324 break;
325 }
326
327 /* If it isn't one of the case above, it can cause a trap. */
328 return 1;
329 }
330
331 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
332
333 int
334 rtx_addr_can_trap_p (rtx x)
335 {
336 return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
337 }
338
339 /* Return true if X is an address that is known to not be zero. */
340
341 bool
342 nonzero_address_p (rtx x)
343 {
344 enum rtx_code code = GET_CODE (x);
345
346 switch (code)
347 {
348 case SYMBOL_REF:
349 return !SYMBOL_REF_WEAK (x);
350
351 case LABEL_REF:
352 return true;
353
354 case REG:
355 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
356 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
357 || x == stack_pointer_rtx
358 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
359 return true;
360 /* All of the virtual frame registers are stack references. */
361 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
362 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
363 return true;
364 return false;
365
366 case CONST:
367 return nonzero_address_p (XEXP (x, 0));
368
369 case PLUS:
370 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
371 {
372 /* Pointers aren't allowed to wrap. If we've got a register
373 that is known to be a pointer, and a positive offset, then
374 the composite can't be zero. */
375 if (INTVAL (XEXP (x, 1)) > 0
376 && REG_P (XEXP (x, 0))
377 && REG_POINTER (XEXP (x, 0)))
378 return true;
379
380 return nonzero_address_p (XEXP (x, 0));
381 }
382 /* Handle PIC references. */
383 else if (XEXP (x, 0) == pic_offset_table_rtx
384 && CONSTANT_P (XEXP (x, 1)))
385 return true;
386 return false;
387
388 case PRE_MODIFY:
389 /* Similar to the above; allow positive offsets. Further, since
390 auto-inc is only allowed in memories, the register must be a
391 pointer. */
392 if (GET_CODE (XEXP (x, 1)) == CONST_INT
393 && INTVAL (XEXP (x, 1)) > 0)
394 return true;
395 return nonzero_address_p (XEXP (x, 0));
396
397 case PRE_INC:
398 /* Similarly. Further, the offset is always positive. */
399 return true;
400
401 case PRE_DEC:
402 case POST_DEC:
403 case POST_INC:
404 case POST_MODIFY:
405 return nonzero_address_p (XEXP (x, 0));
406
407 case LO_SUM:
408 return nonzero_address_p (XEXP (x, 1));
409
410 default:
411 break;
412 }
413
414 /* If it isn't one of the case above, might be zero. */
415 return false;
416 }
417
418 /* Return 1 if X refers to a memory location whose address
419 cannot be compared reliably with constant addresses,
420 or if X refers to a BLKmode memory object.
421 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
422 zero, we are slightly more conservative. */
423
424 int
425 rtx_addr_varies_p (rtx x, int for_alias)
426 {
427 enum rtx_code code;
428 int i;
429 const char *fmt;
430
431 if (x == 0)
432 return 0;
433
434 code = GET_CODE (x);
435 if (code == MEM)
436 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
437
438 fmt = GET_RTX_FORMAT (code);
439 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
440 if (fmt[i] == 'e')
441 {
442 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
443 return 1;
444 }
445 else if (fmt[i] == 'E')
446 {
447 int j;
448 for (j = 0; j < XVECLEN (x, i); j++)
449 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
450 return 1;
451 }
452 return 0;
453 }
454 \f
455 /* Return the value of the integer term in X, if one is apparent;
456 otherwise return 0.
457 Only obvious integer terms are detected.
458 This is used in cse.c with the `related_value' field. */
459
460 HOST_WIDE_INT
461 get_integer_term (rtx x)
462 {
463 if (GET_CODE (x) == CONST)
464 x = XEXP (x, 0);
465
466 if (GET_CODE (x) == MINUS
467 && GET_CODE (XEXP (x, 1)) == CONST_INT)
468 return - INTVAL (XEXP (x, 1));
469 if (GET_CODE (x) == PLUS
470 && GET_CODE (XEXP (x, 1)) == CONST_INT)
471 return INTVAL (XEXP (x, 1));
472 return 0;
473 }
474
475 /* If X is a constant, return the value sans apparent integer term;
476 otherwise return 0.
477 Only obvious integer terms are detected. */
478
479 rtx
480 get_related_value (rtx x)
481 {
482 if (GET_CODE (x) != CONST)
483 return 0;
484 x = XEXP (x, 0);
485 if (GET_CODE (x) == PLUS
486 && GET_CODE (XEXP (x, 1)) == CONST_INT)
487 return XEXP (x, 0);
488 else if (GET_CODE (x) == MINUS
489 && GET_CODE (XEXP (x, 1)) == CONST_INT)
490 return XEXP (x, 0);
491 return 0;
492 }
493 \f
494 /* Return the number of places FIND appears within X. If COUNT_DEST is
495 zero, we do not count occurrences inside the destination of a SET. */
496
497 int
498 count_occurrences (rtx x, rtx find, int count_dest)
499 {
500 int i, j;
501 enum rtx_code code;
502 const char *format_ptr;
503 int count;
504
505 if (x == find)
506 return 1;
507
508 code = GET_CODE (x);
509
510 switch (code)
511 {
512 case REG:
513 case CONST_INT:
514 case CONST_DOUBLE:
515 case CONST_VECTOR:
516 case SYMBOL_REF:
517 case CODE_LABEL:
518 case PC:
519 case CC0:
520 return 0;
521
522 case MEM:
523 if (MEM_P (find) && rtx_equal_p (x, find))
524 return 1;
525 break;
526
527 case SET:
528 if (SET_DEST (x) == find && ! count_dest)
529 return count_occurrences (SET_SRC (x), find, count_dest);
530 break;
531
532 default:
533 break;
534 }
535
536 format_ptr = GET_RTX_FORMAT (code);
537 count = 0;
538
539 for (i = 0; i < GET_RTX_LENGTH (code); i++)
540 {
541 switch (*format_ptr++)
542 {
543 case 'e':
544 count += count_occurrences (XEXP (x, i), find, count_dest);
545 break;
546
547 case 'E':
548 for (j = 0; j < XVECLEN (x, i); j++)
549 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
550 break;
551 }
552 }
553 return count;
554 }
555 \f
556 /* Nonzero if register REG appears somewhere within IN.
557 Also works if REG is not a register; in this case it checks
558 for a subexpression of IN that is Lisp "equal" to REG. */
559
560 int
561 reg_mentioned_p (rtx reg, rtx in)
562 {
563 const char *fmt;
564 int i;
565 enum rtx_code code;
566
567 if (in == 0)
568 return 0;
569
570 if (reg == in)
571 return 1;
572
573 if (GET_CODE (in) == LABEL_REF)
574 return reg == XEXP (in, 0);
575
576 code = GET_CODE (in);
577
578 switch (code)
579 {
580 /* Compare registers by number. */
581 case REG:
582 return REG_P (reg) && REGNO (in) == REGNO (reg);
583
584 /* These codes have no constituent expressions
585 and are unique. */
586 case SCRATCH:
587 case CC0:
588 case PC:
589 return 0;
590
591 case CONST_INT:
592 case CONST_VECTOR:
593 case CONST_DOUBLE:
594 /* These are kept unique for a given value. */
595 return 0;
596
597 default:
598 break;
599 }
600
601 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
602 return 1;
603
604 fmt = GET_RTX_FORMAT (code);
605
606 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
607 {
608 if (fmt[i] == 'E')
609 {
610 int j;
611 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
612 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
613 return 1;
614 }
615 else if (fmt[i] == 'e'
616 && reg_mentioned_p (reg, XEXP (in, i)))
617 return 1;
618 }
619 return 0;
620 }
621 \f
622 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
623 no CODE_LABEL insn. */
624
625 int
626 no_labels_between_p (rtx beg, rtx end)
627 {
628 rtx p;
629 if (beg == end)
630 return 0;
631 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
632 if (LABEL_P (p))
633 return 0;
634 return 1;
635 }
636
637 /* Nonzero if register REG is used in an insn between
638 FROM_INSN and TO_INSN (exclusive of those two). */
639
640 int
641 reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn)
642 {
643 rtx insn;
644
645 if (from_insn == to_insn)
646 return 0;
647
648 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
649 if (INSN_P (insn)
650 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
651 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
652 return 1;
653 return 0;
654 }
655 \f
656 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
657 is entirely replaced by a new value and the only use is as a SET_DEST,
658 we do not consider it a reference. */
659
660 int
661 reg_referenced_p (rtx x, rtx body)
662 {
663 int i;
664
665 switch (GET_CODE (body))
666 {
667 case SET:
668 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
669 return 1;
670
671 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
672 of a REG that occupies all of the REG, the insn references X if
673 it is mentioned in the destination. */
674 if (GET_CODE (SET_DEST (body)) != CC0
675 && GET_CODE (SET_DEST (body)) != PC
676 && !REG_P (SET_DEST (body))
677 && ! (GET_CODE (SET_DEST (body)) == SUBREG
678 && REG_P (SUBREG_REG (SET_DEST (body)))
679 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
680 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
681 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
682 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
683 && reg_overlap_mentioned_p (x, SET_DEST (body)))
684 return 1;
685 return 0;
686
687 case ASM_OPERANDS:
688 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
689 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
690 return 1;
691 return 0;
692
693 case CALL:
694 case USE:
695 case IF_THEN_ELSE:
696 return reg_overlap_mentioned_p (x, body);
697
698 case TRAP_IF:
699 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
700
701 case PREFETCH:
702 return reg_overlap_mentioned_p (x, XEXP (body, 0));
703
704 case UNSPEC:
705 case UNSPEC_VOLATILE:
706 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
707 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
708 return 1;
709 return 0;
710
711 case PARALLEL:
712 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
713 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
714 return 1;
715 return 0;
716
717 case CLOBBER:
718 if (MEM_P (XEXP (body, 0)))
719 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
720 return 1;
721 return 0;
722
723 case COND_EXEC:
724 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
725 return 1;
726 return reg_referenced_p (x, COND_EXEC_CODE (body));
727
728 default:
729 return 0;
730 }
731 }
732 \f
733 /* Nonzero if register REG is set or clobbered in an insn between
734 FROM_INSN and TO_INSN (exclusive of those two). */
735
736 int
737 reg_set_between_p (rtx reg, rtx from_insn, rtx to_insn)
738 {
739 rtx insn;
740
741 if (from_insn == to_insn)
742 return 0;
743
744 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
745 if (INSN_P (insn) && reg_set_p (reg, insn))
746 return 1;
747 return 0;
748 }
749
750 /* Internals of reg_set_between_p. */
751 int
752 reg_set_p (rtx reg, rtx insn)
753 {
754 /* We can be passed an insn or part of one. If we are passed an insn,
755 check if a side-effect of the insn clobbers REG. */
756 if (INSN_P (insn)
757 && (FIND_REG_INC_NOTE (insn, reg)
758 || (CALL_P (insn)
759 && ((REG_P (reg)
760 && REGNO (reg) < FIRST_PSEUDO_REGISTER
761 && TEST_HARD_REG_BIT (regs_invalidated_by_call,
762 REGNO (reg)))
763 || MEM_P (reg)
764 || find_reg_fusage (insn, CLOBBER, reg)))))
765 return 1;
766
767 return set_of (reg, insn) != NULL_RTX;
768 }
769
770 /* Similar to reg_set_between_p, but check all registers in X. Return 0
771 only if none of them are modified between START and END. Return 1 if
772 X contains a MEM; this routine does usememory aliasing. */
773
774 int
775 modified_between_p (rtx x, rtx start, rtx end)
776 {
777 enum rtx_code code = GET_CODE (x);
778 const char *fmt;
779 int i, j;
780 rtx insn;
781
782 if (start == end)
783 return 0;
784
785 switch (code)
786 {
787 case CONST_INT:
788 case CONST_DOUBLE:
789 case CONST_VECTOR:
790 case CONST:
791 case SYMBOL_REF:
792 case LABEL_REF:
793 return 0;
794
795 case PC:
796 case CC0:
797 return 1;
798
799 case MEM:
800 if (modified_between_p (XEXP (x, 0), start, end))
801 return 1;
802 if (MEM_READONLY_P (x))
803 return 0;
804 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
805 if (memory_modified_in_insn_p (x, insn))
806 return 1;
807 return 0;
808 break;
809
810 case REG:
811 return reg_set_between_p (x, start, end);
812
813 default:
814 break;
815 }
816
817 fmt = GET_RTX_FORMAT (code);
818 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
819 {
820 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
821 return 1;
822
823 else if (fmt[i] == 'E')
824 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
825 if (modified_between_p (XVECEXP (x, i, j), start, end))
826 return 1;
827 }
828
829 return 0;
830 }
831
832 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
833 of them are modified in INSN. Return 1 if X contains a MEM; this routine
834 does use memory aliasing. */
835
836 int
837 modified_in_p (rtx x, rtx insn)
838 {
839 enum rtx_code code = GET_CODE (x);
840 const char *fmt;
841 int i, j;
842
843 switch (code)
844 {
845 case CONST_INT:
846 case CONST_DOUBLE:
847 case CONST_VECTOR:
848 case CONST:
849 case SYMBOL_REF:
850 case LABEL_REF:
851 return 0;
852
853 case PC:
854 case CC0:
855 return 1;
856
857 case MEM:
858 if (modified_in_p (XEXP (x, 0), insn))
859 return 1;
860 if (MEM_READONLY_P (x))
861 return 0;
862 if (memory_modified_in_insn_p (x, insn))
863 return 1;
864 return 0;
865 break;
866
867 case REG:
868 return reg_set_p (x, insn);
869
870 default:
871 break;
872 }
873
874 fmt = GET_RTX_FORMAT (code);
875 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
876 {
877 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
878 return 1;
879
880 else if (fmt[i] == 'E')
881 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
882 if (modified_in_p (XVECEXP (x, i, j), insn))
883 return 1;
884 }
885
886 return 0;
887 }
888 \f
889 /* Helper function for set_of. */
890 struct set_of_data
891 {
892 rtx found;
893 rtx pat;
894 };
895
896 static void
897 set_of_1 (rtx x, rtx pat, void *data1)
898 {
899 struct set_of_data *data = (struct set_of_data *) (data1);
900 if (rtx_equal_p (x, data->pat)
901 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
902 data->found = pat;
903 }
904
905 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
906 (either directly or via STRICT_LOW_PART and similar modifiers). */
907 rtx
908 set_of (rtx pat, rtx insn)
909 {
910 struct set_of_data data;
911 data.found = NULL_RTX;
912 data.pat = pat;
913 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
914 return data.found;
915 }
916 \f
917 /* Given an INSN, return a SET expression if this insn has only a single SET.
918 It may also have CLOBBERs, USEs, or SET whose output
919 will not be used, which we ignore. */
920
921 rtx
922 single_set_2 (rtx insn, rtx pat)
923 {
924 rtx set = NULL;
925 int set_verified = 1;
926 int i;
927
928 if (GET_CODE (pat) == PARALLEL)
929 {
930 for (i = 0; i < XVECLEN (pat, 0); i++)
931 {
932 rtx sub = XVECEXP (pat, 0, i);
933 switch (GET_CODE (sub))
934 {
935 case USE:
936 case CLOBBER:
937 break;
938
939 case SET:
940 /* We can consider insns having multiple sets, where all
941 but one are dead as single set insns. In common case
942 only single set is present in the pattern so we want
943 to avoid checking for REG_UNUSED notes unless necessary.
944
945 When we reach set first time, we just expect this is
946 the single set we are looking for and only when more
947 sets are found in the insn, we check them. */
948 if (!set_verified)
949 {
950 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
951 && !side_effects_p (set))
952 set = NULL;
953 else
954 set_verified = 1;
955 }
956 if (!set)
957 set = sub, set_verified = 0;
958 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
959 || side_effects_p (sub))
960 return NULL_RTX;
961 break;
962
963 default:
964 return NULL_RTX;
965 }
966 }
967 }
968 return set;
969 }
970
971 /* Given an INSN, return nonzero if it has more than one SET, else return
972 zero. */
973
974 int
975 multiple_sets (rtx insn)
976 {
977 int found;
978 int i;
979
980 /* INSN must be an insn. */
981 if (! INSN_P (insn))
982 return 0;
983
984 /* Only a PARALLEL can have multiple SETs. */
985 if (GET_CODE (PATTERN (insn)) == PARALLEL)
986 {
987 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
988 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
989 {
990 /* If we have already found a SET, then return now. */
991 if (found)
992 return 1;
993 else
994 found = 1;
995 }
996 }
997
998 /* Either zero or one SET. */
999 return 0;
1000 }
1001 \f
1002 /* Return nonzero if the destination of SET equals the source
1003 and there are no side effects. */
1004
1005 int
1006 set_noop_p (rtx set)
1007 {
1008 rtx src = SET_SRC (set);
1009 rtx dst = SET_DEST (set);
1010
1011 if (dst == pc_rtx && src == pc_rtx)
1012 return 1;
1013
1014 if (MEM_P (dst) && MEM_P (src))
1015 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1016
1017 if (GET_CODE (dst) == ZERO_EXTRACT)
1018 return rtx_equal_p (XEXP (dst, 0), src)
1019 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1020 && !side_effects_p (src);
1021
1022 if (GET_CODE (dst) == STRICT_LOW_PART)
1023 dst = XEXP (dst, 0);
1024
1025 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1026 {
1027 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1028 return 0;
1029 src = SUBREG_REG (src);
1030 dst = SUBREG_REG (dst);
1031 }
1032
1033 return (REG_P (src) && REG_P (dst)
1034 && REGNO (src) == REGNO (dst));
1035 }
1036 \f
1037 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1038 value to itself. */
1039
1040 int
1041 noop_move_p (rtx insn)
1042 {
1043 rtx pat = PATTERN (insn);
1044
1045 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1046 return 1;
1047
1048 /* Insns carrying these notes are useful later on. */
1049 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1050 return 0;
1051
1052 /* For now treat an insn with a REG_RETVAL note as a
1053 a special insn which should not be considered a no-op. */
1054 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
1055 return 0;
1056
1057 if (GET_CODE (pat) == SET && set_noop_p (pat))
1058 return 1;
1059
1060 if (GET_CODE (pat) == PARALLEL)
1061 {
1062 int i;
1063 /* If nothing but SETs of registers to themselves,
1064 this insn can also be deleted. */
1065 for (i = 0; i < XVECLEN (pat, 0); i++)
1066 {
1067 rtx tem = XVECEXP (pat, 0, i);
1068
1069 if (GET_CODE (tem) == USE
1070 || GET_CODE (tem) == CLOBBER)
1071 continue;
1072
1073 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1074 return 0;
1075 }
1076
1077 return 1;
1078 }
1079 return 0;
1080 }
1081 \f
1082
1083 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1084 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1085 If the object was modified, if we hit a partial assignment to X, or hit a
1086 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1087 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1088 be the src. */
1089
1090 rtx
1091 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1092 {
1093 rtx p;
1094
1095 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1096 p = PREV_INSN (p))
1097 if (INSN_P (p))
1098 {
1099 rtx set = single_set (p);
1100 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1101
1102 if (set && rtx_equal_p (x, SET_DEST (set)))
1103 {
1104 rtx src = SET_SRC (set);
1105
1106 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1107 src = XEXP (note, 0);
1108
1109 if ((valid_to == NULL_RTX
1110 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1111 /* Reject hard registers because we don't usually want
1112 to use them; we'd rather use a pseudo. */
1113 && (! (REG_P (src)
1114 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1115 {
1116 *pinsn = p;
1117 return src;
1118 }
1119 }
1120
1121 /* If set in non-simple way, we don't have a value. */
1122 if (reg_set_p (x, p))
1123 break;
1124 }
1125
1126 return x;
1127 }
1128 \f
1129 /* Return nonzero if register in range [REGNO, ENDREGNO)
1130 appears either explicitly or implicitly in X
1131 other than being stored into.
1132
1133 References contained within the substructure at LOC do not count.
1134 LOC may be zero, meaning don't ignore anything. */
1135
1136 int
1137 refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
1138 rtx *loc)
1139 {
1140 int i;
1141 unsigned int x_regno;
1142 RTX_CODE code;
1143 const char *fmt;
1144
1145 repeat:
1146 /* The contents of a REG_NONNEG note is always zero, so we must come here
1147 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1148 if (x == 0)
1149 return 0;
1150
1151 code = GET_CODE (x);
1152
1153 switch (code)
1154 {
1155 case REG:
1156 x_regno = REGNO (x);
1157
1158 /* If we modifying the stack, frame, or argument pointer, it will
1159 clobber a virtual register. In fact, we could be more precise,
1160 but it isn't worth it. */
1161 if ((x_regno == STACK_POINTER_REGNUM
1162 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1163 || x_regno == ARG_POINTER_REGNUM
1164 #endif
1165 || x_regno == FRAME_POINTER_REGNUM)
1166 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1167 return 1;
1168
1169 return (endregno > x_regno
1170 && regno < x_regno + (x_regno < FIRST_PSEUDO_REGISTER
1171 ? hard_regno_nregs[x_regno][GET_MODE (x)]
1172 : 1));
1173
1174 case SUBREG:
1175 /* If this is a SUBREG of a hard reg, we can see exactly which
1176 registers are being modified. Otherwise, handle normally. */
1177 if (REG_P (SUBREG_REG (x))
1178 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1179 {
1180 unsigned int inner_regno = subreg_regno (x);
1181 unsigned int inner_endregno
1182 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1183 ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1);
1184
1185 return endregno > inner_regno && regno < inner_endregno;
1186 }
1187 break;
1188
1189 case CLOBBER:
1190 case SET:
1191 if (&SET_DEST (x) != loc
1192 /* Note setting a SUBREG counts as referring to the REG it is in for
1193 a pseudo but not for hard registers since we can
1194 treat each word individually. */
1195 && ((GET_CODE (SET_DEST (x)) == SUBREG
1196 && loc != &SUBREG_REG (SET_DEST (x))
1197 && REG_P (SUBREG_REG (SET_DEST (x)))
1198 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1199 && refers_to_regno_p (regno, endregno,
1200 SUBREG_REG (SET_DEST (x)), loc))
1201 || (!REG_P (SET_DEST (x))
1202 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1203 return 1;
1204
1205 if (code == CLOBBER || loc == &SET_SRC (x))
1206 return 0;
1207 x = SET_SRC (x);
1208 goto repeat;
1209
1210 default:
1211 break;
1212 }
1213
1214 /* X does not match, so try its subexpressions. */
1215
1216 fmt = GET_RTX_FORMAT (code);
1217 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1218 {
1219 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1220 {
1221 if (i == 0)
1222 {
1223 x = XEXP (x, 0);
1224 goto repeat;
1225 }
1226 else
1227 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1228 return 1;
1229 }
1230 else if (fmt[i] == 'E')
1231 {
1232 int j;
1233 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1234 if (loc != &XVECEXP (x, i, j)
1235 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1236 return 1;
1237 }
1238 }
1239 return 0;
1240 }
1241
1242 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1243 we check if any register number in X conflicts with the relevant register
1244 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1245 contains a MEM (we don't bother checking for memory addresses that can't
1246 conflict because we expect this to be a rare case. */
1247
1248 int
1249 reg_overlap_mentioned_p (rtx x, rtx in)
1250 {
1251 unsigned int regno, endregno;
1252
1253 /* If either argument is a constant, then modifying X can not
1254 affect IN. Here we look at IN, we can profitably combine
1255 CONSTANT_P (x) with the switch statement below. */
1256 if (CONSTANT_P (in))
1257 return 0;
1258
1259 recurse:
1260 switch (GET_CODE (x))
1261 {
1262 case STRICT_LOW_PART:
1263 case ZERO_EXTRACT:
1264 case SIGN_EXTRACT:
1265 /* Overly conservative. */
1266 x = XEXP (x, 0);
1267 goto recurse;
1268
1269 case SUBREG:
1270 regno = REGNO (SUBREG_REG (x));
1271 if (regno < FIRST_PSEUDO_REGISTER)
1272 regno = subreg_regno (x);
1273 goto do_reg;
1274
1275 case REG:
1276 regno = REGNO (x);
1277 do_reg:
1278 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1279 ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
1280 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1281
1282 case MEM:
1283 {
1284 const char *fmt;
1285 int i;
1286
1287 if (MEM_P (in))
1288 return 1;
1289
1290 fmt = GET_RTX_FORMAT (GET_CODE (in));
1291 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1292 if (fmt[i] == 'e')
1293 {
1294 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1295 return 1;
1296 }
1297 else if (fmt[i] == 'E')
1298 {
1299 int j;
1300 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1301 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1302 return 1;
1303 }
1304
1305 return 0;
1306 }
1307
1308 case SCRATCH:
1309 case PC:
1310 case CC0:
1311 return reg_mentioned_p (x, in);
1312
1313 case PARALLEL:
1314 {
1315 int i;
1316
1317 /* If any register in here refers to it we return true. */
1318 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1319 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1320 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1321 return 1;
1322 return 0;
1323 }
1324
1325 default:
1326 gcc_assert (CONSTANT_P (x));
1327 return 0;
1328 }
1329 }
1330 \f
1331 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1332 (X would be the pattern of an insn).
1333 FUN receives two arguments:
1334 the REG, MEM, CC0 or PC being stored in or clobbered,
1335 the SET or CLOBBER rtx that does the store.
1336
1337 If the item being stored in or clobbered is a SUBREG of a hard register,
1338 the SUBREG will be passed. */
1339
1340 void
1341 note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
1342 {
1343 int i;
1344
1345 if (GET_CODE (x) == COND_EXEC)
1346 x = COND_EXEC_CODE (x);
1347
1348 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1349 {
1350 rtx dest = SET_DEST (x);
1351
1352 while ((GET_CODE (dest) == SUBREG
1353 && (!REG_P (SUBREG_REG (dest))
1354 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1355 || GET_CODE (dest) == ZERO_EXTRACT
1356 || GET_CODE (dest) == STRICT_LOW_PART)
1357 dest = XEXP (dest, 0);
1358
1359 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1360 each of whose first operand is a register. */
1361 if (GET_CODE (dest) == PARALLEL)
1362 {
1363 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1364 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1365 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1366 }
1367 else
1368 (*fun) (dest, x, data);
1369 }
1370
1371 else if (GET_CODE (x) == PARALLEL)
1372 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1373 note_stores (XVECEXP (x, 0, i), fun, data);
1374 }
1375 \f
1376 /* Like notes_stores, but call FUN for each expression that is being
1377 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1378 FUN for each expression, not any interior subexpressions. FUN receives a
1379 pointer to the expression and the DATA passed to this function.
1380
1381 Note that this is not quite the same test as that done in reg_referenced_p
1382 since that considers something as being referenced if it is being
1383 partially set, while we do not. */
1384
1385 void
1386 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1387 {
1388 rtx body = *pbody;
1389 int i;
1390
1391 switch (GET_CODE (body))
1392 {
1393 case COND_EXEC:
1394 (*fun) (&COND_EXEC_TEST (body), data);
1395 note_uses (&COND_EXEC_CODE (body), fun, data);
1396 return;
1397
1398 case PARALLEL:
1399 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1400 note_uses (&XVECEXP (body, 0, i), fun, data);
1401 return;
1402
1403 case USE:
1404 (*fun) (&XEXP (body, 0), data);
1405 return;
1406
1407 case ASM_OPERANDS:
1408 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1409 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1410 return;
1411
1412 case TRAP_IF:
1413 (*fun) (&TRAP_CONDITION (body), data);
1414 return;
1415
1416 case PREFETCH:
1417 (*fun) (&XEXP (body, 0), data);
1418 return;
1419
1420 case UNSPEC:
1421 case UNSPEC_VOLATILE:
1422 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1423 (*fun) (&XVECEXP (body, 0, i), data);
1424 return;
1425
1426 case CLOBBER:
1427 if (MEM_P (XEXP (body, 0)))
1428 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1429 return;
1430
1431 case SET:
1432 {
1433 rtx dest = SET_DEST (body);
1434
1435 /* For sets we replace everything in source plus registers in memory
1436 expression in store and operands of a ZERO_EXTRACT. */
1437 (*fun) (&SET_SRC (body), data);
1438
1439 if (GET_CODE (dest) == ZERO_EXTRACT)
1440 {
1441 (*fun) (&XEXP (dest, 1), data);
1442 (*fun) (&XEXP (dest, 2), data);
1443 }
1444
1445 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1446 dest = XEXP (dest, 0);
1447
1448 if (MEM_P (dest))
1449 (*fun) (&XEXP (dest, 0), data);
1450 }
1451 return;
1452
1453 default:
1454 /* All the other possibilities never store. */
1455 (*fun) (pbody, data);
1456 return;
1457 }
1458 }
1459 \f
1460 /* Return nonzero if X's old contents don't survive after INSN.
1461 This will be true if X is (cc0) or if X is a register and
1462 X dies in INSN or because INSN entirely sets X.
1463
1464 "Entirely set" means set directly and not through a SUBREG, or
1465 ZERO_EXTRACT, so no trace of the old contents remains.
1466 Likewise, REG_INC does not count.
1467
1468 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1469 but for this use that makes no difference, since regs don't overlap
1470 during their lifetimes. Therefore, this function may be used
1471 at any time after deaths have been computed (in flow.c).
1472
1473 If REG is a hard reg that occupies multiple machine registers, this
1474 function will only return 1 if each of those registers will be replaced
1475 by INSN. */
1476
1477 int
1478 dead_or_set_p (rtx insn, rtx x)
1479 {
1480 unsigned int regno, last_regno;
1481 unsigned int i;
1482
1483 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1484 if (GET_CODE (x) == CC0)
1485 return 1;
1486
1487 gcc_assert (REG_P (x));
1488
1489 regno = REGNO (x);
1490 last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
1491 : regno + hard_regno_nregs[regno][GET_MODE (x)] - 1);
1492
1493 for (i = regno; i <= last_regno; i++)
1494 if (! dead_or_set_regno_p (insn, i))
1495 return 0;
1496
1497 return 1;
1498 }
1499
1500 /* Return TRUE iff DEST is a register or subreg of a register and
1501 doesn't change the number of words of the inner register, and any
1502 part of the register is TEST_REGNO. */
1503
1504 static bool
1505 covers_regno_no_parallel_p (rtx dest, unsigned int test_regno)
1506 {
1507 unsigned int regno, endregno;
1508
1509 if (GET_CODE (dest) == SUBREG
1510 && (((GET_MODE_SIZE (GET_MODE (dest))
1511 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1512 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1513 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1514 dest = SUBREG_REG (dest);
1515
1516 if (!REG_P (dest))
1517 return false;
1518
1519 regno = REGNO (dest);
1520 endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
1521 : regno + hard_regno_nregs[regno][GET_MODE (dest)]);
1522 return (test_regno >= regno && test_regno < endregno);
1523 }
1524
1525 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1526 any member matches the covers_regno_no_parallel_p criteria. */
1527
1528 static bool
1529 covers_regno_p (rtx dest, unsigned int test_regno)
1530 {
1531 if (GET_CODE (dest) == PARALLEL)
1532 {
1533 /* Some targets place small structures in registers for return
1534 values of functions, and those registers are wrapped in
1535 PARALLELs that we may see as the destination of a SET. */
1536 int i;
1537
1538 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1539 {
1540 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1541 if (inner != NULL_RTX
1542 && covers_regno_no_parallel_p (inner, test_regno))
1543 return true;
1544 }
1545
1546 return false;
1547 }
1548 else
1549 return covers_regno_no_parallel_p (dest, test_regno);
1550 }
1551
1552 /* Utility function for dead_or_set_p to check an individual register. Also
1553 called from flow.c. */
1554
1555 int
1556 dead_or_set_regno_p (rtx insn, unsigned int test_regno)
1557 {
1558 rtx pattern;
1559
1560 /* See if there is a death note for something that includes TEST_REGNO. */
1561 if (find_regno_note (insn, REG_DEAD, test_regno))
1562 return 1;
1563
1564 if (CALL_P (insn)
1565 && find_regno_fusage (insn, CLOBBER, test_regno))
1566 return 1;
1567
1568 pattern = PATTERN (insn);
1569
1570 if (GET_CODE (pattern) == COND_EXEC)
1571 pattern = COND_EXEC_CODE (pattern);
1572
1573 if (GET_CODE (pattern) == SET)
1574 return covers_regno_p (SET_DEST (pattern), test_regno);
1575 else if (GET_CODE (pattern) == PARALLEL)
1576 {
1577 int i;
1578
1579 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1580 {
1581 rtx body = XVECEXP (pattern, 0, i);
1582
1583 if (GET_CODE (body) == COND_EXEC)
1584 body = COND_EXEC_CODE (body);
1585
1586 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1587 && covers_regno_p (SET_DEST (body), test_regno))
1588 return 1;
1589 }
1590 }
1591
1592 return 0;
1593 }
1594
1595 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1596 If DATUM is nonzero, look for one whose datum is DATUM. */
1597
1598 rtx
1599 find_reg_note (rtx insn, enum reg_note kind, rtx datum)
1600 {
1601 rtx link;
1602
1603 gcc_assert (insn);
1604
1605 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1606 if (! INSN_P (insn))
1607 return 0;
1608 if (datum == 0)
1609 {
1610 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1611 if (REG_NOTE_KIND (link) == kind)
1612 return link;
1613 return 0;
1614 }
1615
1616 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1617 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1618 return link;
1619 return 0;
1620 }
1621
1622 /* Return the reg-note of kind KIND in insn INSN which applies to register
1623 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1624 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1625 it might be the case that the note overlaps REGNO. */
1626
1627 rtx
1628 find_regno_note (rtx insn, enum reg_note kind, unsigned int regno)
1629 {
1630 rtx link;
1631
1632 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1633 if (! INSN_P (insn))
1634 return 0;
1635
1636 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1637 if (REG_NOTE_KIND (link) == kind
1638 /* Verify that it is a register, so that scratch and MEM won't cause a
1639 problem here. */
1640 && REG_P (XEXP (link, 0))
1641 && REGNO (XEXP (link, 0)) <= regno
1642 && ((REGNO (XEXP (link, 0))
1643 + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
1644 : hard_regno_nregs[REGNO (XEXP (link, 0))]
1645 [GET_MODE (XEXP (link, 0))]))
1646 > regno))
1647 return link;
1648 return 0;
1649 }
1650
1651 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1652 has such a note. */
1653
1654 rtx
1655 find_reg_equal_equiv_note (rtx insn)
1656 {
1657 rtx link;
1658
1659 if (!INSN_P (insn))
1660 return 0;
1661 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1662 if (REG_NOTE_KIND (link) == REG_EQUAL
1663 || REG_NOTE_KIND (link) == REG_EQUIV)
1664 {
1665 if (single_set (insn) == 0)
1666 return 0;
1667 return link;
1668 }
1669 return NULL;
1670 }
1671
1672 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1673 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1674
1675 int
1676 find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
1677 {
1678 /* If it's not a CALL_INSN, it can't possibly have a
1679 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1680 if (!CALL_P (insn))
1681 return 0;
1682
1683 gcc_assert (datum);
1684
1685 if (!REG_P (datum))
1686 {
1687 rtx link;
1688
1689 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1690 link;
1691 link = XEXP (link, 1))
1692 if (GET_CODE (XEXP (link, 0)) == code
1693 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1694 return 1;
1695 }
1696 else
1697 {
1698 unsigned int regno = REGNO (datum);
1699
1700 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1701 to pseudo registers, so don't bother checking. */
1702
1703 if (regno < FIRST_PSEUDO_REGISTER)
1704 {
1705 unsigned int end_regno
1706 = regno + hard_regno_nregs[regno][GET_MODE (datum)];
1707 unsigned int i;
1708
1709 for (i = regno; i < end_regno; i++)
1710 if (find_regno_fusage (insn, code, i))
1711 return 1;
1712 }
1713 }
1714
1715 return 0;
1716 }
1717
1718 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1719 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1720
1721 int
1722 find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
1723 {
1724 rtx link;
1725
1726 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1727 to pseudo registers, so don't bother checking. */
1728
1729 if (regno >= FIRST_PSEUDO_REGISTER
1730 || !CALL_P (insn) )
1731 return 0;
1732
1733 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1734 {
1735 unsigned int regnote;
1736 rtx op, reg;
1737
1738 if (GET_CODE (op = XEXP (link, 0)) == code
1739 && REG_P (reg = XEXP (op, 0))
1740 && (regnote = REGNO (reg)) <= regno
1741 && regnote + hard_regno_nregs[regnote][GET_MODE (reg)] > regno)
1742 return 1;
1743 }
1744
1745 return 0;
1746 }
1747
1748 /* Return true if INSN is a call to a pure function. */
1749
1750 int
1751 pure_call_p (rtx insn)
1752 {
1753 rtx link;
1754
1755 if (!CALL_P (insn) || ! CONST_OR_PURE_CALL_P (insn))
1756 return 0;
1757
1758 /* Look for the note that differentiates const and pure functions. */
1759 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1760 {
1761 rtx u, m;
1762
1763 if (GET_CODE (u = XEXP (link, 0)) == USE
1764 && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
1765 && GET_CODE (XEXP (m, 0)) == SCRATCH)
1766 return 1;
1767 }
1768
1769 return 0;
1770 }
1771 \f
1772 /* Remove register note NOTE from the REG_NOTES of INSN. */
1773
1774 void
1775 remove_note (rtx insn, rtx note)
1776 {
1777 rtx link;
1778
1779 if (note == NULL_RTX)
1780 return;
1781
1782 if (REG_NOTES (insn) == note)
1783 {
1784 REG_NOTES (insn) = XEXP (note, 1);
1785 return;
1786 }
1787
1788 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1789 if (XEXP (link, 1) == note)
1790 {
1791 XEXP (link, 1) = XEXP (note, 1);
1792 return;
1793 }
1794
1795 gcc_unreachable ();
1796 }
1797
1798 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1799 return 1 if it is found. A simple equality test is used to determine if
1800 NODE matches. */
1801
1802 int
1803 in_expr_list_p (rtx listp, rtx node)
1804 {
1805 rtx x;
1806
1807 for (x = listp; x; x = XEXP (x, 1))
1808 if (node == XEXP (x, 0))
1809 return 1;
1810
1811 return 0;
1812 }
1813
1814 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1815 remove that entry from the list if it is found.
1816
1817 A simple equality test is used to determine if NODE matches. */
1818
1819 void
1820 remove_node_from_expr_list (rtx node, rtx *listp)
1821 {
1822 rtx temp = *listp;
1823 rtx prev = NULL_RTX;
1824
1825 while (temp)
1826 {
1827 if (node == XEXP (temp, 0))
1828 {
1829 /* Splice the node out of the list. */
1830 if (prev)
1831 XEXP (prev, 1) = XEXP (temp, 1);
1832 else
1833 *listp = XEXP (temp, 1);
1834
1835 return;
1836 }
1837
1838 prev = temp;
1839 temp = XEXP (temp, 1);
1840 }
1841 }
1842 \f
1843 /* Nonzero if X contains any volatile instructions. These are instructions
1844 which may cause unpredictable machine state instructions, and thus no
1845 instructions should be moved or combined across them. This includes
1846 only volatile asms and UNSPEC_VOLATILE instructions. */
1847
1848 int
1849 volatile_insn_p (rtx x)
1850 {
1851 RTX_CODE code;
1852
1853 code = GET_CODE (x);
1854 switch (code)
1855 {
1856 case LABEL_REF:
1857 case SYMBOL_REF:
1858 case CONST_INT:
1859 case CONST:
1860 case CONST_DOUBLE:
1861 case CONST_VECTOR:
1862 case CC0:
1863 case PC:
1864 case REG:
1865 case SCRATCH:
1866 case CLOBBER:
1867 case ADDR_VEC:
1868 case ADDR_DIFF_VEC:
1869 case CALL:
1870 case MEM:
1871 return 0;
1872
1873 case UNSPEC_VOLATILE:
1874 /* case TRAP_IF: This isn't clear yet. */
1875 return 1;
1876
1877 case ASM_INPUT:
1878 case ASM_OPERANDS:
1879 if (MEM_VOLATILE_P (x))
1880 return 1;
1881
1882 default:
1883 break;
1884 }
1885
1886 /* Recursively scan the operands of this expression. */
1887
1888 {
1889 const char *fmt = GET_RTX_FORMAT (code);
1890 int i;
1891
1892 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1893 {
1894 if (fmt[i] == 'e')
1895 {
1896 if (volatile_insn_p (XEXP (x, i)))
1897 return 1;
1898 }
1899 else if (fmt[i] == 'E')
1900 {
1901 int j;
1902 for (j = 0; j < XVECLEN (x, i); j++)
1903 if (volatile_insn_p (XVECEXP (x, i, j)))
1904 return 1;
1905 }
1906 }
1907 }
1908 return 0;
1909 }
1910
1911 /* Nonzero if X contains any volatile memory references
1912 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
1913
1914 int
1915 volatile_refs_p (rtx x)
1916 {
1917 RTX_CODE code;
1918
1919 code = GET_CODE (x);
1920 switch (code)
1921 {
1922 case LABEL_REF:
1923 case SYMBOL_REF:
1924 case CONST_INT:
1925 case CONST:
1926 case CONST_DOUBLE:
1927 case CONST_VECTOR:
1928 case CC0:
1929 case PC:
1930 case REG:
1931 case SCRATCH:
1932 case CLOBBER:
1933 case ADDR_VEC:
1934 case ADDR_DIFF_VEC:
1935 return 0;
1936
1937 case UNSPEC_VOLATILE:
1938 return 1;
1939
1940 case MEM:
1941 case ASM_INPUT:
1942 case ASM_OPERANDS:
1943 if (MEM_VOLATILE_P (x))
1944 return 1;
1945
1946 default:
1947 break;
1948 }
1949
1950 /* Recursively scan the operands of this expression. */
1951
1952 {
1953 const char *fmt = GET_RTX_FORMAT (code);
1954 int i;
1955
1956 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1957 {
1958 if (fmt[i] == 'e')
1959 {
1960 if (volatile_refs_p (XEXP (x, i)))
1961 return 1;
1962 }
1963 else if (fmt[i] == 'E')
1964 {
1965 int j;
1966 for (j = 0; j < XVECLEN (x, i); j++)
1967 if (volatile_refs_p (XVECEXP (x, i, j)))
1968 return 1;
1969 }
1970 }
1971 }
1972 return 0;
1973 }
1974
1975 /* Similar to above, except that it also rejects register pre- and post-
1976 incrementing. */
1977
1978 int
1979 side_effects_p (rtx x)
1980 {
1981 RTX_CODE code;
1982
1983 code = GET_CODE (x);
1984 switch (code)
1985 {
1986 case LABEL_REF:
1987 case SYMBOL_REF:
1988 case CONST_INT:
1989 case CONST:
1990 case CONST_DOUBLE:
1991 case CONST_VECTOR:
1992 case CC0:
1993 case PC:
1994 case REG:
1995 case SCRATCH:
1996 case ADDR_VEC:
1997 case ADDR_DIFF_VEC:
1998 return 0;
1999
2000 case CLOBBER:
2001 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2002 when some combination can't be done. If we see one, don't think
2003 that we can simplify the expression. */
2004 return (GET_MODE (x) != VOIDmode);
2005
2006 case PRE_INC:
2007 case PRE_DEC:
2008 case POST_INC:
2009 case POST_DEC:
2010 case PRE_MODIFY:
2011 case POST_MODIFY:
2012 case CALL:
2013 case UNSPEC_VOLATILE:
2014 /* case TRAP_IF: This isn't clear yet. */
2015 return 1;
2016
2017 case MEM:
2018 case ASM_INPUT:
2019 case ASM_OPERANDS:
2020 if (MEM_VOLATILE_P (x))
2021 return 1;
2022
2023 default:
2024 break;
2025 }
2026
2027 /* Recursively scan the operands of this expression. */
2028
2029 {
2030 const char *fmt = GET_RTX_FORMAT (code);
2031 int i;
2032
2033 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2034 {
2035 if (fmt[i] == 'e')
2036 {
2037 if (side_effects_p (XEXP (x, i)))
2038 return 1;
2039 }
2040 else if (fmt[i] == 'E')
2041 {
2042 int j;
2043 for (j = 0; j < XVECLEN (x, i); j++)
2044 if (side_effects_p (XVECEXP (x, i, j)))
2045 return 1;
2046 }
2047 }
2048 }
2049 return 0;
2050 }
2051 \f
2052 enum may_trap_p_flags
2053 {
2054 MTP_UNALIGNED_MEMS = 1,
2055 MTP_AFTER_MOVE = 2
2056 };
2057 /* Return nonzero if evaluating rtx X might cause a trap.
2058 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2059 unaligned memory accesses on strict alignment machines. If
2060 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2061 cannot trap at its current location, but it might become trapping if moved
2062 elsewhere. */
2063
2064 static int
2065 may_trap_p_1 (rtx x, unsigned flags)
2066 {
2067 int i;
2068 enum rtx_code code;
2069 const char *fmt;
2070 bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
2071
2072 if (x == 0)
2073 return 0;
2074 code = GET_CODE (x);
2075 switch (code)
2076 {
2077 /* Handle these cases quickly. */
2078 case CONST_INT:
2079 case CONST_DOUBLE:
2080 case CONST_VECTOR:
2081 case SYMBOL_REF:
2082 case LABEL_REF:
2083 case CONST:
2084 case PC:
2085 case CC0:
2086 case REG:
2087 case SCRATCH:
2088 return 0;
2089
2090 case ASM_INPUT:
2091 case UNSPEC_VOLATILE:
2092 case TRAP_IF:
2093 return 1;
2094
2095 case ASM_OPERANDS:
2096 return MEM_VOLATILE_P (x);
2097
2098 /* Memory ref can trap unless it's a static var or a stack slot. */
2099 case MEM:
2100 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2101 reference; moving it out of condition might cause its address
2102 become invalid. */
2103 !(flags & MTP_AFTER_MOVE)
2104 && MEM_NOTRAP_P (x)
2105 && (!STRICT_ALIGNMENT || !unaligned_mems))
2106 return 0;
2107 return
2108 rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
2109
2110 /* Division by a non-constant might trap. */
2111 case DIV:
2112 case MOD:
2113 case UDIV:
2114 case UMOD:
2115 if (HONOR_SNANS (GET_MODE (x)))
2116 return 1;
2117 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2118 return flag_trapping_math;
2119 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2120 return 1;
2121 break;
2122
2123 case EXPR_LIST:
2124 /* An EXPR_LIST is used to represent a function call. This
2125 certainly may trap. */
2126 return 1;
2127
2128 case GE:
2129 case GT:
2130 case LE:
2131 case LT:
2132 case LTGT:
2133 case COMPARE:
2134 /* Some floating point comparisons may trap. */
2135 if (!flag_trapping_math)
2136 break;
2137 /* ??? There is no machine independent way to check for tests that trap
2138 when COMPARE is used, though many targets do make this distinction.
2139 For instance, sparc uses CCFPE for compares which generate exceptions
2140 and CCFP for compares which do not generate exceptions. */
2141 if (HONOR_NANS (GET_MODE (x)))
2142 return 1;
2143 /* But often the compare has some CC mode, so check operand
2144 modes as well. */
2145 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2146 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2147 return 1;
2148 break;
2149
2150 case EQ:
2151 case NE:
2152 if (HONOR_SNANS (GET_MODE (x)))
2153 return 1;
2154 /* Often comparison is CC mode, so check operand modes. */
2155 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2156 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2157 return 1;
2158 break;
2159
2160 case FIX:
2161 /* Conversion of floating point might trap. */
2162 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2163 return 1;
2164 break;
2165
2166 case NEG:
2167 case ABS:
2168 case SUBREG:
2169 /* These operations don't trap even with floating point. */
2170 break;
2171
2172 default:
2173 /* Any floating arithmetic may trap. */
2174 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2175 && flag_trapping_math)
2176 return 1;
2177 }
2178
2179 fmt = GET_RTX_FORMAT (code);
2180 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2181 {
2182 if (fmt[i] == 'e')
2183 {
2184 if (may_trap_p_1 (XEXP (x, i), flags))
2185 return 1;
2186 }
2187 else if (fmt[i] == 'E')
2188 {
2189 int j;
2190 for (j = 0; j < XVECLEN (x, i); j++)
2191 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2192 return 1;
2193 }
2194 }
2195 return 0;
2196 }
2197
2198 /* Return nonzero if evaluating rtx X might cause a trap. */
2199
2200 int
2201 may_trap_p (rtx x)
2202 {
2203 return may_trap_p_1 (x, 0);
2204 }
2205
2206 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2207 is moved from its current location by some optimization. */
2208
2209 int
2210 may_trap_after_code_motion_p (rtx x)
2211 {
2212 return may_trap_p_1 (x, MTP_AFTER_MOVE);
2213 }
2214
2215 /* Same as above, but additionally return nonzero if evaluating rtx X might
2216 cause a fault. We define a fault for the purpose of this function as a
2217 erroneous execution condition that cannot be encountered during the normal
2218 execution of a valid program; the typical example is an unaligned memory
2219 access on a strict alignment machine. The compiler guarantees that it
2220 doesn't generate code that will fault from a valid program, but this
2221 guarantee doesn't mean anything for individual instructions. Consider
2222 the following example:
2223
2224 struct S { int d; union { char *cp; int *ip; }; };
2225
2226 int foo(struct S *s)
2227 {
2228 if (s->d == 1)
2229 return *s->ip;
2230 else
2231 return *s->cp;
2232 }
2233
2234 on a strict alignment machine. In a valid program, foo will never be
2235 invoked on a structure for which d is equal to 1 and the underlying
2236 unique field of the union not aligned on a 4-byte boundary, but the
2237 expression *s->ip might cause a fault if considered individually.
2238
2239 At the RTL level, potentially problematic expressions will almost always
2240 verify may_trap_p; for example, the above dereference can be emitted as
2241 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2242 However, suppose that foo is inlined in a caller that causes s->cp to
2243 point to a local character variable and guarantees that s->d is not set
2244 to 1; foo may have been effectively translated into pseudo-RTL as:
2245
2246 if ((reg:SI) == 1)
2247 (set (reg:SI) (mem:SI (%fp - 7)))
2248 else
2249 (set (reg:QI) (mem:QI (%fp - 7)))
2250
2251 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2252 memory reference to a stack slot, but it will certainly cause a fault
2253 on a strict alignment machine. */
2254
2255 int
2256 may_trap_or_fault_p (rtx x)
2257 {
2258 return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
2259 }
2260 \f
2261 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2262 i.e., an inequality. */
2263
2264 int
2265 inequality_comparisons_p (rtx x)
2266 {
2267 const char *fmt;
2268 int len, i;
2269 enum rtx_code code = GET_CODE (x);
2270
2271 switch (code)
2272 {
2273 case REG:
2274 case SCRATCH:
2275 case PC:
2276 case CC0:
2277 case CONST_INT:
2278 case CONST_DOUBLE:
2279 case CONST_VECTOR:
2280 case CONST:
2281 case LABEL_REF:
2282 case SYMBOL_REF:
2283 return 0;
2284
2285 case LT:
2286 case LTU:
2287 case GT:
2288 case GTU:
2289 case LE:
2290 case LEU:
2291 case GE:
2292 case GEU:
2293 return 1;
2294
2295 default:
2296 break;
2297 }
2298
2299 len = GET_RTX_LENGTH (code);
2300 fmt = GET_RTX_FORMAT (code);
2301
2302 for (i = 0; i < len; i++)
2303 {
2304 if (fmt[i] == 'e')
2305 {
2306 if (inequality_comparisons_p (XEXP (x, i)))
2307 return 1;
2308 }
2309 else if (fmt[i] == 'E')
2310 {
2311 int j;
2312 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2313 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2314 return 1;
2315 }
2316 }
2317
2318 return 0;
2319 }
2320 \f
2321 /* Replace any occurrence of FROM in X with TO. The function does
2322 not enter into CONST_DOUBLE for the replace.
2323
2324 Note that copying is not done so X must not be shared unless all copies
2325 are to be modified. */
2326
2327 rtx
2328 replace_rtx (rtx x, rtx from, rtx to)
2329 {
2330 int i, j;
2331 const char *fmt;
2332
2333 /* The following prevents loops occurrence when we change MEM in
2334 CONST_DOUBLE onto the same CONST_DOUBLE. */
2335 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2336 return x;
2337
2338 if (x == from)
2339 return to;
2340
2341 /* Allow this function to make replacements in EXPR_LISTs. */
2342 if (x == 0)
2343 return 0;
2344
2345 if (GET_CODE (x) == SUBREG)
2346 {
2347 rtx new = replace_rtx (SUBREG_REG (x), from, to);
2348
2349 if (GET_CODE (new) == CONST_INT)
2350 {
2351 x = simplify_subreg (GET_MODE (x), new,
2352 GET_MODE (SUBREG_REG (x)),
2353 SUBREG_BYTE (x));
2354 gcc_assert (x);
2355 }
2356 else
2357 SUBREG_REG (x) = new;
2358
2359 return x;
2360 }
2361 else if (GET_CODE (x) == ZERO_EXTEND)
2362 {
2363 rtx new = replace_rtx (XEXP (x, 0), from, to);
2364
2365 if (GET_CODE (new) == CONST_INT)
2366 {
2367 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2368 new, GET_MODE (XEXP (x, 0)));
2369 gcc_assert (x);
2370 }
2371 else
2372 XEXP (x, 0) = new;
2373
2374 return x;
2375 }
2376
2377 fmt = GET_RTX_FORMAT (GET_CODE (x));
2378 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2379 {
2380 if (fmt[i] == 'e')
2381 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2382 else if (fmt[i] == 'E')
2383 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2384 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2385 }
2386
2387 return x;
2388 }
2389 \f
2390 /* Replace occurrences of the old label in *X with the new one.
2391 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2392
2393 int
2394 replace_label (rtx *x, void *data)
2395 {
2396 rtx l = *x;
2397 rtx old_label = ((replace_label_data *) data)->r1;
2398 rtx new_label = ((replace_label_data *) data)->r2;
2399 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2400
2401 if (l == NULL_RTX)
2402 return 0;
2403
2404 if (GET_CODE (l) == SYMBOL_REF
2405 && CONSTANT_POOL_ADDRESS_P (l))
2406 {
2407 rtx c = get_pool_constant (l);
2408 if (rtx_referenced_p (old_label, c))
2409 {
2410 rtx new_c, new_l;
2411 replace_label_data *d = (replace_label_data *) data;
2412
2413 /* Create a copy of constant C; replace the label inside
2414 but do not update LABEL_NUSES because uses in constant pool
2415 are not counted. */
2416 new_c = copy_rtx (c);
2417 d->update_label_nuses = false;
2418 for_each_rtx (&new_c, replace_label, data);
2419 d->update_label_nuses = update_label_nuses;
2420
2421 /* Add the new constant NEW_C to constant pool and replace
2422 the old reference to constant by new reference. */
2423 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2424 *x = replace_rtx (l, l, new_l);
2425 }
2426 return 0;
2427 }
2428
2429 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2430 field. This is not handled by for_each_rtx because it doesn't
2431 handle unprinted ('0') fields. */
2432 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2433 JUMP_LABEL (l) = new_label;
2434
2435 if ((GET_CODE (l) == LABEL_REF
2436 || GET_CODE (l) == INSN_LIST)
2437 && XEXP (l, 0) == old_label)
2438 {
2439 XEXP (l, 0) = new_label;
2440 if (update_label_nuses)
2441 {
2442 ++LABEL_NUSES (new_label);
2443 --LABEL_NUSES (old_label);
2444 }
2445 return 0;
2446 }
2447
2448 return 0;
2449 }
2450
2451 /* When *BODY is equal to X or X is directly referenced by *BODY
2452 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2453 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2454
2455 static int
2456 rtx_referenced_p_1 (rtx *body, void *x)
2457 {
2458 rtx y = (rtx) x;
2459
2460 if (*body == NULL_RTX)
2461 return y == NULL_RTX;
2462
2463 /* Return true if a label_ref *BODY refers to label Y. */
2464 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2465 return XEXP (*body, 0) == y;
2466
2467 /* If *BODY is a reference to pool constant traverse the constant. */
2468 if (GET_CODE (*body) == SYMBOL_REF
2469 && CONSTANT_POOL_ADDRESS_P (*body))
2470 return rtx_referenced_p (y, get_pool_constant (*body));
2471
2472 /* By default, compare the RTL expressions. */
2473 return rtx_equal_p (*body, y);
2474 }
2475
2476 /* Return true if X is referenced in BODY. */
2477
2478 int
2479 rtx_referenced_p (rtx x, rtx body)
2480 {
2481 return for_each_rtx (&body, rtx_referenced_p_1, x);
2482 }
2483
2484 /* If INSN is a tablejump return true and store the label (before jump table) to
2485 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2486
2487 bool
2488 tablejump_p (rtx insn, rtx *labelp, rtx *tablep)
2489 {
2490 rtx label, table;
2491
2492 if (JUMP_P (insn)
2493 && (label = JUMP_LABEL (insn)) != NULL_RTX
2494 && (table = next_active_insn (label)) != NULL_RTX
2495 && JUMP_P (table)
2496 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2497 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2498 {
2499 if (labelp)
2500 *labelp = label;
2501 if (tablep)
2502 *tablep = table;
2503 return true;
2504 }
2505 return false;
2506 }
2507
2508 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2509 constant that is not in the constant pool and not in the condition
2510 of an IF_THEN_ELSE. */
2511
2512 static int
2513 computed_jump_p_1 (rtx x)
2514 {
2515 enum rtx_code code = GET_CODE (x);
2516 int i, j;
2517 const char *fmt;
2518
2519 switch (code)
2520 {
2521 case LABEL_REF:
2522 case PC:
2523 return 0;
2524
2525 case CONST:
2526 case CONST_INT:
2527 case CONST_DOUBLE:
2528 case CONST_VECTOR:
2529 case SYMBOL_REF:
2530 case REG:
2531 return 1;
2532
2533 case MEM:
2534 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2535 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2536
2537 case IF_THEN_ELSE:
2538 return (computed_jump_p_1 (XEXP (x, 1))
2539 || computed_jump_p_1 (XEXP (x, 2)));
2540
2541 default:
2542 break;
2543 }
2544
2545 fmt = GET_RTX_FORMAT (code);
2546 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2547 {
2548 if (fmt[i] == 'e'
2549 && computed_jump_p_1 (XEXP (x, i)))
2550 return 1;
2551
2552 else if (fmt[i] == 'E')
2553 for (j = 0; j < XVECLEN (x, i); j++)
2554 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2555 return 1;
2556 }
2557
2558 return 0;
2559 }
2560
2561 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2562
2563 Tablejumps and casesi insns are not considered indirect jumps;
2564 we can recognize them by a (use (label_ref)). */
2565
2566 int
2567 computed_jump_p (rtx insn)
2568 {
2569 int i;
2570 if (JUMP_P (insn))
2571 {
2572 rtx pat = PATTERN (insn);
2573
2574 if (find_reg_note (insn, REG_LABEL, NULL_RTX))
2575 return 0;
2576 else if (GET_CODE (pat) == PARALLEL)
2577 {
2578 int len = XVECLEN (pat, 0);
2579 int has_use_labelref = 0;
2580
2581 for (i = len - 1; i >= 0; i--)
2582 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2583 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2584 == LABEL_REF))
2585 has_use_labelref = 1;
2586
2587 if (! has_use_labelref)
2588 for (i = len - 1; i >= 0; i--)
2589 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2590 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2591 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2592 return 1;
2593 }
2594 else if (GET_CODE (pat) == SET
2595 && SET_DEST (pat) == pc_rtx
2596 && computed_jump_p_1 (SET_SRC (pat)))
2597 return 1;
2598 }
2599 return 0;
2600 }
2601
2602 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2603 calls. Processes the subexpressions of EXP and passes them to F. */
2604 static int
2605 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2606 {
2607 int result, i, j;
2608 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2609 rtx *x;
2610
2611 for (; format[n] != '\0'; n++)
2612 {
2613 switch (format[n])
2614 {
2615 case 'e':
2616 /* Call F on X. */
2617 x = &XEXP (exp, n);
2618 result = (*f) (x, data);
2619 if (result == -1)
2620 /* Do not traverse sub-expressions. */
2621 continue;
2622 else if (result != 0)
2623 /* Stop the traversal. */
2624 return result;
2625
2626 if (*x == NULL_RTX)
2627 /* There are no sub-expressions. */
2628 continue;
2629
2630 i = non_rtx_starting_operands[GET_CODE (*x)];
2631 if (i >= 0)
2632 {
2633 result = for_each_rtx_1 (*x, i, f, data);
2634 if (result != 0)
2635 return result;
2636 }
2637 break;
2638
2639 case 'V':
2640 case 'E':
2641 if (XVEC (exp, n) == 0)
2642 continue;
2643 for (j = 0; j < XVECLEN (exp, n); ++j)
2644 {
2645 /* Call F on X. */
2646 x = &XVECEXP (exp, n, j);
2647 result = (*f) (x, data);
2648 if (result == -1)
2649 /* Do not traverse sub-expressions. */
2650 continue;
2651 else if (result != 0)
2652 /* Stop the traversal. */
2653 return result;
2654
2655 if (*x == NULL_RTX)
2656 /* There are no sub-expressions. */
2657 continue;
2658
2659 i = non_rtx_starting_operands[GET_CODE (*x)];
2660 if (i >= 0)
2661 {
2662 result = for_each_rtx_1 (*x, i, f, data);
2663 if (result != 0)
2664 return result;
2665 }
2666 }
2667 break;
2668
2669 default:
2670 /* Nothing to do. */
2671 break;
2672 }
2673 }
2674
2675 return 0;
2676 }
2677
2678 /* Traverse X via depth-first search, calling F for each
2679 sub-expression (including X itself). F is also passed the DATA.
2680 If F returns -1, do not traverse sub-expressions, but continue
2681 traversing the rest of the tree. If F ever returns any other
2682 nonzero value, stop the traversal, and return the value returned
2683 by F. Otherwise, return 0. This function does not traverse inside
2684 tree structure that contains RTX_EXPRs, or into sub-expressions
2685 whose format code is `0' since it is not known whether or not those
2686 codes are actually RTL.
2687
2688 This routine is very general, and could (should?) be used to
2689 implement many of the other routines in this file. */
2690
2691 int
2692 for_each_rtx (rtx *x, rtx_function f, void *data)
2693 {
2694 int result;
2695 int i;
2696
2697 /* Call F on X. */
2698 result = (*f) (x, data);
2699 if (result == -1)
2700 /* Do not traverse sub-expressions. */
2701 return 0;
2702 else if (result != 0)
2703 /* Stop the traversal. */
2704 return result;
2705
2706 if (*x == NULL_RTX)
2707 /* There are no sub-expressions. */
2708 return 0;
2709
2710 i = non_rtx_starting_operands[GET_CODE (*x)];
2711 if (i < 0)
2712 return 0;
2713
2714 return for_each_rtx_1 (*x, i, f, data);
2715 }
2716
2717
2718 /* Searches X for any reference to REGNO, returning the rtx of the
2719 reference found if any. Otherwise, returns NULL_RTX. */
2720
2721 rtx
2722 regno_use_in (unsigned int regno, rtx x)
2723 {
2724 const char *fmt;
2725 int i, j;
2726 rtx tem;
2727
2728 if (REG_P (x) && REGNO (x) == regno)
2729 return x;
2730
2731 fmt = GET_RTX_FORMAT (GET_CODE (x));
2732 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2733 {
2734 if (fmt[i] == 'e')
2735 {
2736 if ((tem = regno_use_in (regno, XEXP (x, i))))
2737 return tem;
2738 }
2739 else if (fmt[i] == 'E')
2740 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2741 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2742 return tem;
2743 }
2744
2745 return NULL_RTX;
2746 }
2747
2748 /* Return a value indicating whether OP, an operand of a commutative
2749 operation, is preferred as the first or second operand. The higher
2750 the value, the stronger the preference for being the first operand.
2751 We use negative values to indicate a preference for the first operand
2752 and positive values for the second operand. */
2753
2754 int
2755 commutative_operand_precedence (rtx op)
2756 {
2757 enum rtx_code code = GET_CODE (op);
2758
2759 /* Constants always come the second operand. Prefer "nice" constants. */
2760 if (code == CONST_INT)
2761 return -7;
2762 if (code == CONST_DOUBLE)
2763 return -6;
2764 op = avoid_constant_pool_reference (op);
2765 code = GET_CODE (op);
2766
2767 switch (GET_RTX_CLASS (code))
2768 {
2769 case RTX_CONST_OBJ:
2770 if (code == CONST_INT)
2771 return -5;
2772 if (code == CONST_DOUBLE)
2773 return -4;
2774 return -3;
2775
2776 case RTX_EXTRA:
2777 /* SUBREGs of objects should come second. */
2778 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2779 return -2;
2780
2781 if (!CONSTANT_P (op))
2782 return 0;
2783 else
2784 /* As for RTX_CONST_OBJ. */
2785 return -3;
2786
2787 case RTX_OBJ:
2788 /* Complex expressions should be the first, so decrease priority
2789 of objects. */
2790 return -1;
2791
2792 case RTX_COMM_ARITH:
2793 /* Prefer operands that are themselves commutative to be first.
2794 This helps to make things linear. In particular,
2795 (and (and (reg) (reg)) (not (reg))) is canonical. */
2796 return 4;
2797
2798 case RTX_BIN_ARITH:
2799 /* If only one operand is a binary expression, it will be the first
2800 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2801 is canonical, although it will usually be further simplified. */
2802 return 2;
2803
2804 case RTX_UNARY:
2805 /* Then prefer NEG and NOT. */
2806 if (code == NEG || code == NOT)
2807 return 1;
2808
2809 default:
2810 return 0;
2811 }
2812 }
2813
2814 /* Return 1 iff it is necessary to swap operands of commutative operation
2815 in order to canonicalize expression. */
2816
2817 int
2818 swap_commutative_operands_p (rtx x, rtx y)
2819 {
2820 return (commutative_operand_precedence (x)
2821 < commutative_operand_precedence (y));
2822 }
2823
2824 /* Return 1 if X is an autoincrement side effect and the register is
2825 not the stack pointer. */
2826 int
2827 auto_inc_p (rtx x)
2828 {
2829 switch (GET_CODE (x))
2830 {
2831 case PRE_INC:
2832 case POST_INC:
2833 case PRE_DEC:
2834 case POST_DEC:
2835 case PRE_MODIFY:
2836 case POST_MODIFY:
2837 /* There are no REG_INC notes for SP. */
2838 if (XEXP (x, 0) != stack_pointer_rtx)
2839 return 1;
2840 default:
2841 break;
2842 }
2843 return 0;
2844 }
2845
2846 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2847 int
2848 loc_mentioned_in_p (rtx *loc, rtx in)
2849 {
2850 enum rtx_code code = GET_CODE (in);
2851 const char *fmt = GET_RTX_FORMAT (code);
2852 int i, j;
2853
2854 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2855 {
2856 if (loc == &in->u.fld[i].rt_rtx)
2857 return 1;
2858 if (fmt[i] == 'e')
2859 {
2860 if (loc_mentioned_in_p (loc, XEXP (in, i)))
2861 return 1;
2862 }
2863 else if (fmt[i] == 'E')
2864 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
2865 if (loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
2866 return 1;
2867 }
2868 return 0;
2869 }
2870
2871 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2872 and SUBREG_BYTE, return the bit offset where the subreg begins
2873 (counting from the least significant bit of the operand). */
2874
2875 unsigned int
2876 subreg_lsb_1 (enum machine_mode outer_mode,
2877 enum machine_mode inner_mode,
2878 unsigned int subreg_byte)
2879 {
2880 unsigned int bitpos;
2881 unsigned int byte;
2882 unsigned int word;
2883
2884 /* A paradoxical subreg begins at bit position 0. */
2885 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
2886 return 0;
2887
2888 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
2889 /* If the subreg crosses a word boundary ensure that
2890 it also begins and ends on a word boundary. */
2891 gcc_assert (!((subreg_byte % UNITS_PER_WORD
2892 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
2893 && (subreg_byte % UNITS_PER_WORD
2894 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
2895
2896 if (WORDS_BIG_ENDIAN)
2897 word = (GET_MODE_SIZE (inner_mode)
2898 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
2899 else
2900 word = subreg_byte / UNITS_PER_WORD;
2901 bitpos = word * BITS_PER_WORD;
2902
2903 if (BYTES_BIG_ENDIAN)
2904 byte = (GET_MODE_SIZE (inner_mode)
2905 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
2906 else
2907 byte = subreg_byte % UNITS_PER_WORD;
2908 bitpos += byte * BITS_PER_UNIT;
2909
2910 return bitpos;
2911 }
2912
2913 /* Given a subreg X, return the bit offset where the subreg begins
2914 (counting from the least significant bit of the reg). */
2915
2916 unsigned int
2917 subreg_lsb (rtx x)
2918 {
2919 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
2920 SUBREG_BYTE (x));
2921 }
2922
2923 /* This function returns the regno offset of a subreg expression.
2924 xregno - A regno of an inner hard subreg_reg (or what will become one).
2925 xmode - The mode of xregno.
2926 offset - The byte offset.
2927 ymode - The mode of a top level SUBREG (or what may become one).
2928 RETURN - The regno offset which would be used. */
2929 unsigned int
2930 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
2931 unsigned int offset, enum machine_mode ymode)
2932 {
2933 int nregs_xmode, nregs_ymode, nregs_xmode_unit_int;
2934 int mode_multiple, nregs_multiple;
2935 int y_offset;
2936 enum machine_mode xmode_unit, xmode_unit_int;
2937
2938 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
2939
2940 if (GET_MODE_INNER (xmode) == VOIDmode)
2941 xmode_unit = xmode;
2942 else
2943 xmode_unit = GET_MODE_INNER (xmode);
2944
2945 if (FLOAT_MODE_P (xmode_unit))
2946 {
2947 xmode_unit_int = int_mode_for_mode (xmode_unit);
2948 if (xmode_unit_int == BLKmode)
2949 /* It's probably bad to be here; a port should have an integer mode
2950 that's the same size as anything of which it takes a SUBREG. */
2951 xmode_unit_int = xmode_unit;
2952 }
2953 else
2954 xmode_unit_int = xmode_unit;
2955
2956 nregs_xmode_unit_int = hard_regno_nregs[xregno][xmode_unit_int];
2957
2958 /* Adjust nregs_xmode to allow for 'holes'. */
2959 if (nregs_xmode_unit_int != hard_regno_nregs[xregno][xmode_unit])
2960 nregs_xmode = nregs_xmode_unit_int * GET_MODE_NUNITS (xmode);
2961 else
2962 nregs_xmode = hard_regno_nregs[xregno][xmode];
2963
2964 nregs_ymode = hard_regno_nregs[xregno][ymode];
2965
2966 /* If this is a big endian paradoxical subreg, which uses more actual
2967 hard registers than the original register, we must return a negative
2968 offset so that we find the proper highpart of the register. */
2969 if (offset == 0
2970 && nregs_ymode > nregs_xmode
2971 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
2972 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
2973 return nregs_xmode - nregs_ymode;
2974
2975 if (offset == 0 || nregs_xmode == nregs_ymode)
2976 return 0;
2977
2978 /* Size of ymode must not be greater than the size of xmode. */
2979 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
2980 gcc_assert (mode_multiple != 0);
2981
2982 y_offset = offset / GET_MODE_SIZE (ymode);
2983 nregs_multiple = nregs_xmode / nregs_ymode;
2984 return (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
2985 }
2986
2987 /* This function returns true when the offset is representable via
2988 subreg_offset in the given regno.
2989 xregno - A regno of an inner hard subreg_reg (or what will become one).
2990 xmode - The mode of xregno.
2991 offset - The byte offset.
2992 ymode - The mode of a top level SUBREG (or what may become one).
2993 RETURN - Whether the offset is representable. */
2994 bool
2995 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
2996 unsigned int offset, enum machine_mode ymode)
2997 {
2998 int nregs_xmode, nregs_ymode, nregs_xmode_unit, nregs_xmode_unit_int;
2999 int mode_multiple, nregs_multiple;
3000 int y_offset;
3001 enum machine_mode xmode_unit, xmode_unit_int;
3002
3003 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3004
3005 if (GET_MODE_INNER (xmode) == VOIDmode)
3006 xmode_unit = xmode;
3007 else
3008 xmode_unit = GET_MODE_INNER (xmode);
3009
3010 if (FLOAT_MODE_P (xmode_unit))
3011 {
3012 xmode_unit_int = int_mode_for_mode (xmode_unit);
3013 if (xmode_unit_int == BLKmode)
3014 /* It's probably bad to be here; a port should have an integer mode
3015 that's the same size as anything of which it takes a SUBREG. */
3016 xmode_unit_int = xmode_unit;
3017 }
3018 else
3019 xmode_unit_int = xmode_unit;
3020
3021 nregs_xmode_unit = hard_regno_nregs[xregno][xmode_unit];
3022 nregs_xmode_unit_int = hard_regno_nregs[xregno][xmode_unit_int];
3023
3024 /* If there are holes in a non-scalar mode in registers, we expect
3025 that it is made up of its units concatenated together. */
3026 if (nregs_xmode_unit != nregs_xmode_unit_int)
3027 {
3028 gcc_assert (nregs_xmode_unit * GET_MODE_NUNITS (xmode)
3029 == hard_regno_nregs[xregno][xmode]);
3030
3031 /* You can only ask for a SUBREG of a value with holes in the middle
3032 if you don't cross the holes. (Such a SUBREG should be done by
3033 picking a different register class, or doing it in memory if
3034 necessary.) An example of a value with holes is XCmode on 32-bit
3035 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3036 3 for each part, but in memory it's two 128-bit parts.
3037 Padding is assumed to be at the end (not necessarily the 'high part')
3038 of each unit. */
3039 if (nregs_xmode_unit != nregs_xmode_unit_int
3040 && (offset / GET_MODE_SIZE (xmode_unit_int) + 1
3041 < GET_MODE_NUNITS (xmode))
3042 && (offset / GET_MODE_SIZE (xmode_unit_int)
3043 != ((offset + GET_MODE_SIZE (ymode) - 1)
3044 / GET_MODE_SIZE (xmode_unit_int))))
3045 return false;
3046
3047 nregs_xmode = nregs_xmode_unit_int * GET_MODE_NUNITS (xmode);
3048 }
3049 else
3050 nregs_xmode = hard_regno_nregs[xregno][xmode];
3051
3052 nregs_ymode = hard_regno_nregs[xregno][ymode];
3053
3054 /* Paradoxical subregs are otherwise valid. */
3055 if (offset == 0
3056 && nregs_ymode > nregs_xmode
3057 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3058 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
3059 return true;
3060
3061 /* Lowpart subregs are otherwise valid. */
3062 if (offset == subreg_lowpart_offset (ymode, xmode))
3063 return true;
3064
3065 /* This should always pass, otherwise we don't know how to verify
3066 the constraint. These conditions may be relaxed but
3067 subreg_regno_offset would need to be redesigned. */
3068 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3069 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3070
3071 /* The XMODE value can be seen as a vector of NREGS_XMODE
3072 values. The subreg must represent a lowpart of given field.
3073 Compute what field it is. */
3074 offset -= subreg_lowpart_offset (ymode,
3075 mode_for_size (GET_MODE_BITSIZE (xmode)
3076 / nregs_xmode,
3077 MODE_INT, 0));
3078
3079 /* Size of ymode must not be greater than the size of xmode. */
3080 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3081 gcc_assert (mode_multiple != 0);
3082
3083 y_offset = offset / GET_MODE_SIZE (ymode);
3084 nregs_multiple = nregs_xmode / nregs_ymode;
3085
3086 gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0);
3087 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3088
3089 return (!(y_offset % (mode_multiple / nregs_multiple)));
3090 }
3091
3092 /* Return the final regno that a subreg expression refers to. */
3093 unsigned int
3094 subreg_regno (rtx x)
3095 {
3096 unsigned int ret;
3097 rtx subreg = SUBREG_REG (x);
3098 int regno = REGNO (subreg);
3099
3100 ret = regno + subreg_regno_offset (regno,
3101 GET_MODE (subreg),
3102 SUBREG_BYTE (x),
3103 GET_MODE (x));
3104 return ret;
3105
3106 }
3107 struct parms_set_data
3108 {
3109 int nregs;
3110 HARD_REG_SET regs;
3111 };
3112
3113 /* Helper function for noticing stores to parameter registers. */
3114 static void
3115 parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3116 {
3117 struct parms_set_data *d = data;
3118 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3119 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3120 {
3121 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3122 d->nregs--;
3123 }
3124 }
3125
3126 /* Look backward for first parameter to be loaded.
3127 Note that loads of all parameters will not necessarily be
3128 found if CSE has eliminated some of them (e.g., an argument
3129 to the outer function is passed down as a parameter).
3130 Do not skip BOUNDARY. */
3131 rtx
3132 find_first_parameter_load (rtx call_insn, rtx boundary)
3133 {
3134 struct parms_set_data parm;
3135 rtx p, before, first_set;
3136
3137 /* Since different machines initialize their parameter registers
3138 in different orders, assume nothing. Collect the set of all
3139 parameter registers. */
3140 CLEAR_HARD_REG_SET (parm.regs);
3141 parm.nregs = 0;
3142 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3143 if (GET_CODE (XEXP (p, 0)) == USE
3144 && REG_P (XEXP (XEXP (p, 0), 0)))
3145 {
3146 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3147
3148 /* We only care about registers which can hold function
3149 arguments. */
3150 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3151 continue;
3152
3153 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3154 parm.nregs++;
3155 }
3156 before = call_insn;
3157 first_set = call_insn;
3158
3159 /* Search backward for the first set of a register in this set. */
3160 while (parm.nregs && before != boundary)
3161 {
3162 before = PREV_INSN (before);
3163
3164 /* It is possible that some loads got CSEed from one call to
3165 another. Stop in that case. */
3166 if (CALL_P (before))
3167 break;
3168
3169 /* Our caller needs either ensure that we will find all sets
3170 (in case code has not been optimized yet), or take care
3171 for possible labels in a way by setting boundary to preceding
3172 CODE_LABEL. */
3173 if (LABEL_P (before))
3174 {
3175 gcc_assert (before == boundary);
3176 break;
3177 }
3178
3179 if (INSN_P (before))
3180 {
3181 int nregs_old = parm.nregs;
3182 note_stores (PATTERN (before), parms_set, &parm);
3183 /* If we found something that did not set a parameter reg,
3184 we're done. Do not keep going, as that might result
3185 in hoisting an insn before the setting of a pseudo
3186 that is used by the hoisted insn. */
3187 if (nregs_old != parm.nregs)
3188 first_set = before;
3189 else
3190 break;
3191 }
3192 }
3193 return first_set;
3194 }
3195
3196 /* Return true if we should avoid inserting code between INSN and preceding
3197 call instruction. */
3198
3199 bool
3200 keep_with_call_p (rtx insn)
3201 {
3202 rtx set;
3203
3204 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3205 {
3206 if (REG_P (SET_DEST (set))
3207 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3208 && fixed_regs[REGNO (SET_DEST (set))]
3209 && general_operand (SET_SRC (set), VOIDmode))
3210 return true;
3211 if (REG_P (SET_SRC (set))
3212 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3213 && REG_P (SET_DEST (set))
3214 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3215 return true;
3216 /* There may be a stack pop just after the call and before the store
3217 of the return register. Search for the actual store when deciding
3218 if we can break or not. */
3219 if (SET_DEST (set) == stack_pointer_rtx)
3220 {
3221 rtx i2 = next_nonnote_insn (insn);
3222 if (i2 && keep_with_call_p (i2))
3223 return true;
3224 }
3225 }
3226 return false;
3227 }
3228
3229 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3230 to non-complex jumps. That is, direct unconditional, conditional,
3231 and tablejumps, but not computed jumps or returns. It also does
3232 not apply to the fallthru case of a conditional jump. */
3233
3234 bool
3235 label_is_jump_target_p (rtx label, rtx jump_insn)
3236 {
3237 rtx tmp = JUMP_LABEL (jump_insn);
3238
3239 if (label == tmp)
3240 return true;
3241
3242 if (tablejump_p (jump_insn, NULL, &tmp))
3243 {
3244 rtvec vec = XVEC (PATTERN (tmp),
3245 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3246 int i, veclen = GET_NUM_ELEM (vec);
3247
3248 for (i = 0; i < veclen; ++i)
3249 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3250 return true;
3251 }
3252
3253 return false;
3254 }
3255
3256 \f
3257 /* Return an estimate of the cost of computing rtx X.
3258 One use is in cse, to decide which expression to keep in the hash table.
3259 Another is in rtl generation, to pick the cheapest way to multiply.
3260 Other uses like the latter are expected in the future. */
3261
3262 int
3263 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
3264 {
3265 int i, j;
3266 enum rtx_code code;
3267 const char *fmt;
3268 int total;
3269
3270 if (x == 0)
3271 return 0;
3272
3273 /* Compute the default costs of certain things.
3274 Note that targetm.rtx_costs can override the defaults. */
3275
3276 code = GET_CODE (x);
3277 switch (code)
3278 {
3279 case MULT:
3280 total = COSTS_N_INSNS (5);
3281 break;
3282 case DIV:
3283 case UDIV:
3284 case MOD:
3285 case UMOD:
3286 total = COSTS_N_INSNS (7);
3287 break;
3288 case USE:
3289 /* Used in loop.c and combine.c as a marker. */
3290 total = 0;
3291 break;
3292 default:
3293 total = COSTS_N_INSNS (1);
3294 }
3295
3296 switch (code)
3297 {
3298 case REG:
3299 return 0;
3300
3301 case SUBREG:
3302 total = 0;
3303 /* If we can't tie these modes, make this expensive. The larger
3304 the mode, the more expensive it is. */
3305 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3306 return COSTS_N_INSNS (2
3307 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3308 break;
3309
3310 default:
3311 if (targetm.rtx_costs (x, code, outer_code, &total))
3312 return total;
3313 break;
3314 }
3315
3316 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3317 which is already in total. */
3318
3319 fmt = GET_RTX_FORMAT (code);
3320 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3321 if (fmt[i] == 'e')
3322 total += rtx_cost (XEXP (x, i), code);
3323 else if (fmt[i] == 'E')
3324 for (j = 0; j < XVECLEN (x, i); j++)
3325 total += rtx_cost (XVECEXP (x, i, j), code);
3326
3327 return total;
3328 }
3329 \f
3330 /* Return cost of address expression X.
3331 Expect that X is properly formed address reference. */
3332
3333 int
3334 address_cost (rtx x, enum machine_mode mode)
3335 {
3336 /* We may be asked for cost of various unusual addresses, such as operands
3337 of push instruction. It is not worthwhile to complicate writing
3338 of the target hook by such cases. */
3339
3340 if (!memory_address_p (mode, x))
3341 return 1000;
3342
3343 return targetm.address_cost (x);
3344 }
3345
3346 /* If the target doesn't override, compute the cost as with arithmetic. */
3347
3348 int
3349 default_address_cost (rtx x)
3350 {
3351 return rtx_cost (x, MEM);
3352 }
3353 \f
3354
3355 unsigned HOST_WIDE_INT
3356 nonzero_bits (rtx x, enum machine_mode mode)
3357 {
3358 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3359 }
3360
3361 unsigned int
3362 num_sign_bit_copies (rtx x, enum machine_mode mode)
3363 {
3364 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3365 }
3366
3367 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3368 It avoids exponential behavior in nonzero_bits1 when X has
3369 identical subexpressions on the first or the second level. */
3370
3371 static unsigned HOST_WIDE_INT
3372 cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x,
3373 enum machine_mode known_mode,
3374 unsigned HOST_WIDE_INT known_ret)
3375 {
3376 if (x == known_x && mode == known_mode)
3377 return known_ret;
3378
3379 /* Try to find identical subexpressions. If found call
3380 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3381 precomputed value for the subexpression as KNOWN_RET. */
3382
3383 if (ARITHMETIC_P (x))
3384 {
3385 rtx x0 = XEXP (x, 0);
3386 rtx x1 = XEXP (x, 1);
3387
3388 /* Check the first level. */
3389 if (x0 == x1)
3390 return nonzero_bits1 (x, mode, x0, mode,
3391 cached_nonzero_bits (x0, mode, known_x,
3392 known_mode, known_ret));
3393
3394 /* Check the second level. */
3395 if (ARITHMETIC_P (x0)
3396 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3397 return nonzero_bits1 (x, mode, x1, mode,
3398 cached_nonzero_bits (x1, mode, known_x,
3399 known_mode, known_ret));
3400
3401 if (ARITHMETIC_P (x1)
3402 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3403 return nonzero_bits1 (x, mode, x0, mode,
3404 cached_nonzero_bits (x0, mode, known_x,
3405 known_mode, known_ret));
3406 }
3407
3408 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3409 }
3410
3411 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3412 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3413 is less useful. We can't allow both, because that results in exponential
3414 run time recursion. There is a nullstone testcase that triggered
3415 this. This macro avoids accidental uses of num_sign_bit_copies. */
3416 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3417
3418 /* Given an expression, X, compute which bits in X can be nonzero.
3419 We don't care about bits outside of those defined in MODE.
3420
3421 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3422 an arithmetic operation, we can do better. */
3423
3424 static unsigned HOST_WIDE_INT
3425 nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x,
3426 enum machine_mode known_mode,
3427 unsigned HOST_WIDE_INT known_ret)
3428 {
3429 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3430 unsigned HOST_WIDE_INT inner_nz;
3431 enum rtx_code code;
3432 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3433
3434 /* For floating-point values, assume all bits are needed. */
3435 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
3436 return nonzero;
3437
3438 /* If X is wider than MODE, use its mode instead. */
3439 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3440 {
3441 mode = GET_MODE (x);
3442 nonzero = GET_MODE_MASK (mode);
3443 mode_width = GET_MODE_BITSIZE (mode);
3444 }
3445
3446 if (mode_width > HOST_BITS_PER_WIDE_INT)
3447 /* Our only callers in this case look for single bit values. So
3448 just return the mode mask. Those tests will then be false. */
3449 return nonzero;
3450
3451 #ifndef WORD_REGISTER_OPERATIONS
3452 /* If MODE is wider than X, but both are a single word for both the host
3453 and target machines, we can compute this from which bits of the
3454 object might be nonzero in its own mode, taking into account the fact
3455 that on many CISC machines, accessing an object in a wider mode
3456 causes the high-order bits to become undefined. So they are
3457 not known to be zero. */
3458
3459 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3460 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3461 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3462 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3463 {
3464 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3465 known_x, known_mode, known_ret);
3466 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3467 return nonzero;
3468 }
3469 #endif
3470
3471 code = GET_CODE (x);
3472 switch (code)
3473 {
3474 case REG:
3475 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3476 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3477 all the bits above ptr_mode are known to be zero. */
3478 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3479 && REG_POINTER (x))
3480 nonzero &= GET_MODE_MASK (ptr_mode);
3481 #endif
3482
3483 /* Include declared information about alignment of pointers. */
3484 /* ??? We don't properly preserve REG_POINTER changes across
3485 pointer-to-integer casts, so we can't trust it except for
3486 things that we know must be pointers. See execute/960116-1.c. */
3487 if ((x == stack_pointer_rtx
3488 || x == frame_pointer_rtx
3489 || x == arg_pointer_rtx)
3490 && REGNO_POINTER_ALIGN (REGNO (x)))
3491 {
3492 unsigned HOST_WIDE_INT alignment
3493 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3494
3495 #ifdef PUSH_ROUNDING
3496 /* If PUSH_ROUNDING is defined, it is possible for the
3497 stack to be momentarily aligned only to that amount,
3498 so we pick the least alignment. */
3499 if (x == stack_pointer_rtx && PUSH_ARGS)
3500 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3501 alignment);
3502 #endif
3503
3504 nonzero &= ~(alignment - 1);
3505 }
3506
3507 {
3508 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3509 rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3510 known_mode, known_ret,
3511 &nonzero_for_hook);
3512
3513 if (new)
3514 nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
3515 known_mode, known_ret);
3516
3517 return nonzero_for_hook;
3518 }
3519
3520 case CONST_INT:
3521 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3522 /* If X is negative in MODE, sign-extend the value. */
3523 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3524 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3525 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3526 #endif
3527
3528 return INTVAL (x);
3529
3530 case MEM:
3531 #ifdef LOAD_EXTEND_OP
3532 /* In many, if not most, RISC machines, reading a byte from memory
3533 zeros the rest of the register. Noticing that fact saves a lot
3534 of extra zero-extends. */
3535 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3536 nonzero &= GET_MODE_MASK (GET_MODE (x));
3537 #endif
3538 break;
3539
3540 case EQ: case NE:
3541 case UNEQ: case LTGT:
3542 case GT: case GTU: case UNGT:
3543 case LT: case LTU: case UNLT:
3544 case GE: case GEU: case UNGE:
3545 case LE: case LEU: case UNLE:
3546 case UNORDERED: case ORDERED:
3547 /* If this produces an integer result, we know which bits are set.
3548 Code here used to clear bits outside the mode of X, but that is
3549 now done above. */
3550 /* Mind that MODE is the mode the caller wants to look at this
3551 operation in, and not the actual operation mode. We can wind
3552 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3553 that describes the results of a vector compare. */
3554 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3555 && mode_width <= HOST_BITS_PER_WIDE_INT)
3556 nonzero = STORE_FLAG_VALUE;
3557 break;
3558
3559 case NEG:
3560 #if 0
3561 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3562 and num_sign_bit_copies. */
3563 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3564 == GET_MODE_BITSIZE (GET_MODE (x)))
3565 nonzero = 1;
3566 #endif
3567
3568 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3569 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3570 break;
3571
3572 case ABS:
3573 #if 0
3574 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3575 and num_sign_bit_copies. */
3576 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3577 == GET_MODE_BITSIZE (GET_MODE (x)))
3578 nonzero = 1;
3579 #endif
3580 break;
3581
3582 case TRUNCATE:
3583 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3584 known_x, known_mode, known_ret)
3585 & GET_MODE_MASK (mode));
3586 break;
3587
3588 case ZERO_EXTEND:
3589 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3590 known_x, known_mode, known_ret);
3591 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3592 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3593 break;
3594
3595 case SIGN_EXTEND:
3596 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3597 Otherwise, show all the bits in the outer mode but not the inner
3598 may be nonzero. */
3599 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3600 known_x, known_mode, known_ret);
3601 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3602 {
3603 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3604 if (inner_nz
3605 & (((HOST_WIDE_INT) 1
3606 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3607 inner_nz |= (GET_MODE_MASK (mode)
3608 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3609 }
3610
3611 nonzero &= inner_nz;
3612 break;
3613
3614 case AND:
3615 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3616 known_x, known_mode, known_ret)
3617 & cached_nonzero_bits (XEXP (x, 1), mode,
3618 known_x, known_mode, known_ret);
3619 break;
3620
3621 case XOR: case IOR:
3622 case UMIN: case UMAX: case SMIN: case SMAX:
3623 {
3624 unsigned HOST_WIDE_INT nonzero0 =
3625 cached_nonzero_bits (XEXP (x, 0), mode,
3626 known_x, known_mode, known_ret);
3627
3628 /* Don't call nonzero_bits for the second time if it cannot change
3629 anything. */
3630 if ((nonzero & nonzero0) != nonzero)
3631 nonzero &= nonzero0
3632 | cached_nonzero_bits (XEXP (x, 1), mode,
3633 known_x, known_mode, known_ret);
3634 }
3635 break;
3636
3637 case PLUS: case MINUS:
3638 case MULT:
3639 case DIV: case UDIV:
3640 case MOD: case UMOD:
3641 /* We can apply the rules of arithmetic to compute the number of
3642 high- and low-order zero bits of these operations. We start by
3643 computing the width (position of the highest-order nonzero bit)
3644 and the number of low-order zero bits for each value. */
3645 {
3646 unsigned HOST_WIDE_INT nz0 =
3647 cached_nonzero_bits (XEXP (x, 0), mode,
3648 known_x, known_mode, known_ret);
3649 unsigned HOST_WIDE_INT nz1 =
3650 cached_nonzero_bits (XEXP (x, 1), mode,
3651 known_x, known_mode, known_ret);
3652 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3653 int width0 = floor_log2 (nz0) + 1;
3654 int width1 = floor_log2 (nz1) + 1;
3655 int low0 = floor_log2 (nz0 & -nz0);
3656 int low1 = floor_log2 (nz1 & -nz1);
3657 HOST_WIDE_INT op0_maybe_minusp
3658 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3659 HOST_WIDE_INT op1_maybe_minusp
3660 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3661 unsigned int result_width = mode_width;
3662 int result_low = 0;
3663
3664 switch (code)
3665 {
3666 case PLUS:
3667 result_width = MAX (width0, width1) + 1;
3668 result_low = MIN (low0, low1);
3669 break;
3670 case MINUS:
3671 result_low = MIN (low0, low1);
3672 break;
3673 case MULT:
3674 result_width = width0 + width1;
3675 result_low = low0 + low1;
3676 break;
3677 case DIV:
3678 if (width1 == 0)
3679 break;
3680 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3681 result_width = width0;
3682 break;
3683 case UDIV:
3684 if (width1 == 0)
3685 break;
3686 result_width = width0;
3687 break;
3688 case MOD:
3689 if (width1 == 0)
3690 break;
3691 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3692 result_width = MIN (width0, width1);
3693 result_low = MIN (low0, low1);
3694 break;
3695 case UMOD:
3696 if (width1 == 0)
3697 break;
3698 result_width = MIN (width0, width1);
3699 result_low = MIN (low0, low1);
3700 break;
3701 default:
3702 gcc_unreachable ();
3703 }
3704
3705 if (result_width < mode_width)
3706 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3707
3708 if (result_low > 0)
3709 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3710
3711 #ifdef POINTERS_EXTEND_UNSIGNED
3712 /* If pointers extend unsigned and this is an addition or subtraction
3713 to a pointer in Pmode, all the bits above ptr_mode are known to be
3714 zero. */
3715 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3716 && (code == PLUS || code == MINUS)
3717 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3718 nonzero &= GET_MODE_MASK (ptr_mode);
3719 #endif
3720 }
3721 break;
3722
3723 case ZERO_EXTRACT:
3724 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3725 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3726 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3727 break;
3728
3729 case SUBREG:
3730 /* If this is a SUBREG formed for a promoted variable that has
3731 been zero-extended, we know that at least the high-order bits
3732 are zero, though others might be too. */
3733
3734 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
3735 nonzero = GET_MODE_MASK (GET_MODE (x))
3736 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
3737 known_x, known_mode, known_ret);
3738
3739 /* If the inner mode is a single word for both the host and target
3740 machines, we can compute this from which bits of the inner
3741 object might be nonzero. */
3742 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
3743 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
3744 <= HOST_BITS_PER_WIDE_INT))
3745 {
3746 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
3747 known_x, known_mode, known_ret);
3748
3749 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3750 /* If this is a typical RISC machine, we only have to worry
3751 about the way loads are extended. */
3752 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
3753 ? (((nonzero
3754 & (((unsigned HOST_WIDE_INT) 1
3755 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
3756 != 0))
3757 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
3758 || !MEM_P (SUBREG_REG (x)))
3759 #endif
3760 {
3761 /* On many CISC machines, accessing an object in a wider mode
3762 causes the high-order bits to become undefined. So they are
3763 not known to be zero. */
3764 if (GET_MODE_SIZE (GET_MODE (x))
3765 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
3766 nonzero |= (GET_MODE_MASK (GET_MODE (x))
3767 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
3768 }
3769 }
3770 break;
3771
3772 case ASHIFTRT:
3773 case LSHIFTRT:
3774 case ASHIFT:
3775 case ROTATE:
3776 /* The nonzero bits are in two classes: any bits within MODE
3777 that aren't in GET_MODE (x) are always significant. The rest of the
3778 nonzero bits are those that are significant in the operand of
3779 the shift when shifted the appropriate number of bits. This
3780 shows that high-order bits are cleared by the right shift and
3781 low-order bits by left shifts. */
3782 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3783 && INTVAL (XEXP (x, 1)) >= 0
3784 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3785 {
3786 enum machine_mode inner_mode = GET_MODE (x);
3787 unsigned int width = GET_MODE_BITSIZE (inner_mode);
3788 int count = INTVAL (XEXP (x, 1));
3789 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
3790 unsigned HOST_WIDE_INT op_nonzero =
3791 cached_nonzero_bits (XEXP (x, 0), mode,
3792 known_x, known_mode, known_ret);
3793 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
3794 unsigned HOST_WIDE_INT outer = 0;
3795
3796 if (mode_width > width)
3797 outer = (op_nonzero & nonzero & ~mode_mask);
3798
3799 if (code == LSHIFTRT)
3800 inner >>= count;
3801 else if (code == ASHIFTRT)
3802 {
3803 inner >>= count;
3804
3805 /* If the sign bit may have been nonzero before the shift, we
3806 need to mark all the places it could have been copied to
3807 by the shift as possibly nonzero. */
3808 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
3809 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
3810 }
3811 else if (code == ASHIFT)
3812 inner <<= count;
3813 else
3814 inner = ((inner << (count % width)
3815 | (inner >> (width - (count % width)))) & mode_mask);
3816
3817 nonzero &= (outer | inner);
3818 }
3819 break;
3820
3821 case FFS:
3822 case POPCOUNT:
3823 /* This is at most the number of bits in the mode. */
3824 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
3825 break;
3826
3827 case CLZ:
3828 /* If CLZ has a known value at zero, then the nonzero bits are
3829 that value, plus the number of bits in the mode minus one. */
3830 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3831 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3832 else
3833 nonzero = -1;
3834 break;
3835
3836 case CTZ:
3837 /* If CTZ has a known value at zero, then the nonzero bits are
3838 that value, plus the number of bits in the mode minus one. */
3839 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3840 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3841 else
3842 nonzero = -1;
3843 break;
3844
3845 case PARITY:
3846 nonzero = 1;
3847 break;
3848
3849 case IF_THEN_ELSE:
3850 {
3851 unsigned HOST_WIDE_INT nonzero_true =
3852 cached_nonzero_bits (XEXP (x, 1), mode,
3853 known_x, known_mode, known_ret);
3854
3855 /* Don't call nonzero_bits for the second time if it cannot change
3856 anything. */
3857 if ((nonzero & nonzero_true) != nonzero)
3858 nonzero &= nonzero_true
3859 | cached_nonzero_bits (XEXP (x, 2), mode,
3860 known_x, known_mode, known_ret);
3861 }
3862 break;
3863
3864 default:
3865 break;
3866 }
3867
3868 return nonzero;
3869 }
3870
3871 /* See the macro definition above. */
3872 #undef cached_num_sign_bit_copies
3873
3874 \f
3875 /* The function cached_num_sign_bit_copies is a wrapper around
3876 num_sign_bit_copies1. It avoids exponential behavior in
3877 num_sign_bit_copies1 when X has identical subexpressions on the
3878 first or the second level. */
3879
3880 static unsigned int
3881 cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
3882 enum machine_mode known_mode,
3883 unsigned int known_ret)
3884 {
3885 if (x == known_x && mode == known_mode)
3886 return known_ret;
3887
3888 /* Try to find identical subexpressions. If found call
3889 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
3890 the precomputed value for the subexpression as KNOWN_RET. */
3891
3892 if (ARITHMETIC_P (x))
3893 {
3894 rtx x0 = XEXP (x, 0);
3895 rtx x1 = XEXP (x, 1);
3896
3897 /* Check the first level. */
3898 if (x0 == x1)
3899 return
3900 num_sign_bit_copies1 (x, mode, x0, mode,
3901 cached_num_sign_bit_copies (x0, mode, known_x,
3902 known_mode,
3903 known_ret));
3904
3905 /* Check the second level. */
3906 if (ARITHMETIC_P (x0)
3907 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3908 return
3909 num_sign_bit_copies1 (x, mode, x1, mode,
3910 cached_num_sign_bit_copies (x1, mode, known_x,
3911 known_mode,
3912 known_ret));
3913
3914 if (ARITHMETIC_P (x1)
3915 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3916 return
3917 num_sign_bit_copies1 (x, mode, x0, mode,
3918 cached_num_sign_bit_copies (x0, mode, known_x,
3919 known_mode,
3920 known_ret));
3921 }
3922
3923 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
3924 }
3925
3926 /* Return the number of bits at the high-order end of X that are known to
3927 be equal to the sign bit. X will be used in mode MODE; if MODE is
3928 VOIDmode, X will be used in its own mode. The returned value will always
3929 be between 1 and the number of bits in MODE. */
3930
3931 static unsigned int
3932 num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
3933 enum machine_mode known_mode,
3934 unsigned int known_ret)
3935 {
3936 enum rtx_code code = GET_CODE (x);
3937 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
3938 int num0, num1, result;
3939 unsigned HOST_WIDE_INT nonzero;
3940
3941 /* If we weren't given a mode, use the mode of X. If the mode is still
3942 VOIDmode, we don't know anything. Likewise if one of the modes is
3943 floating-point. */
3944
3945 if (mode == VOIDmode)
3946 mode = GET_MODE (x);
3947
3948 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
3949 return 1;
3950
3951 /* For a smaller object, just ignore the high bits. */
3952 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
3953 {
3954 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
3955 known_x, known_mode, known_ret);
3956 return MAX (1,
3957 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
3958 }
3959
3960 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
3961 {
3962 #ifndef WORD_REGISTER_OPERATIONS
3963 /* If this machine does not do all register operations on the entire
3964 register and MODE is wider than the mode of X, we can say nothing
3965 at all about the high-order bits. */
3966 return 1;
3967 #else
3968 /* Likewise on machines that do, if the mode of the object is smaller
3969 than a word and loads of that size don't sign extend, we can say
3970 nothing about the high order bits. */
3971 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
3972 #ifdef LOAD_EXTEND_OP
3973 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
3974 #endif
3975 )
3976 return 1;
3977 #endif
3978 }
3979
3980 switch (code)
3981 {
3982 case REG:
3983
3984 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3985 /* If pointers extend signed and this is a pointer in Pmode, say that
3986 all the bits above ptr_mode are known to be sign bit copies. */
3987 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
3988 && REG_POINTER (x))
3989 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
3990 #endif
3991
3992 {
3993 unsigned int copies_for_hook = 1, copies = 1;
3994 rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
3995 known_mode, known_ret,
3996 &copies_for_hook);
3997
3998 if (new)
3999 copies = cached_num_sign_bit_copies (new, mode, known_x,
4000 known_mode, known_ret);
4001
4002 if (copies > 1 || copies_for_hook > 1)
4003 return MAX (copies, copies_for_hook);
4004
4005 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4006 }
4007 break;
4008
4009 case MEM:
4010 #ifdef LOAD_EXTEND_OP
4011 /* Some RISC machines sign-extend all loads of smaller than a word. */
4012 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4013 return MAX (1, ((int) bitwidth
4014 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4015 #endif
4016 break;
4017
4018 case CONST_INT:
4019 /* If the constant is negative, take its 1's complement and remask.
4020 Then see how many zero bits we have. */
4021 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4022 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4023 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4024 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4025
4026 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4027
4028 case SUBREG:
4029 /* If this is a SUBREG for a promoted object that is sign-extended
4030 and we are looking at it in a wider mode, we know that at least the
4031 high-order bits are known to be sign bit copies. */
4032
4033 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4034 {
4035 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4036 known_x, known_mode, known_ret);
4037 return MAX ((int) bitwidth
4038 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4039 num0);
4040 }
4041
4042 /* For a smaller object, just ignore the high bits. */
4043 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4044 {
4045 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4046 known_x, known_mode, known_ret);
4047 return MAX (1, (num0
4048 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4049 - bitwidth)));
4050 }
4051
4052 #ifdef WORD_REGISTER_OPERATIONS
4053 #ifdef LOAD_EXTEND_OP
4054 /* For paradoxical SUBREGs on machines where all register operations
4055 affect the entire register, just look inside. Note that we are
4056 passing MODE to the recursive call, so the number of sign bit copies
4057 will remain relative to that mode, not the inner mode. */
4058
4059 /* This works only if loads sign extend. Otherwise, if we get a
4060 reload for the inner part, it may be loaded from the stack, and
4061 then we lose all sign bit copies that existed before the store
4062 to the stack. */
4063
4064 if ((GET_MODE_SIZE (GET_MODE (x))
4065 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4066 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4067 && MEM_P (SUBREG_REG (x)))
4068 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4069 known_x, known_mode, known_ret);
4070 #endif
4071 #endif
4072 break;
4073
4074 case SIGN_EXTRACT:
4075 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4076 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4077 break;
4078
4079 case SIGN_EXTEND:
4080 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4081 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4082 known_x, known_mode, known_ret));
4083
4084 case TRUNCATE:
4085 /* For a smaller object, just ignore the high bits. */
4086 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4087 known_x, known_mode, known_ret);
4088 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4089 - bitwidth)));
4090
4091 case NOT:
4092 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4093 known_x, known_mode, known_ret);
4094
4095 case ROTATE: case ROTATERT:
4096 /* If we are rotating left by a number of bits less than the number
4097 of sign bit copies, we can just subtract that amount from the
4098 number. */
4099 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4100 && INTVAL (XEXP (x, 1)) >= 0
4101 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4102 {
4103 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4104 known_x, known_mode, known_ret);
4105 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4106 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4107 }
4108 break;
4109
4110 case NEG:
4111 /* In general, this subtracts one sign bit copy. But if the value
4112 is known to be positive, the number of sign bit copies is the
4113 same as that of the input. Finally, if the input has just one bit
4114 that might be nonzero, all the bits are copies of the sign bit. */
4115 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4116 known_x, known_mode, known_ret);
4117 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4118 return num0 > 1 ? num0 - 1 : 1;
4119
4120 nonzero = nonzero_bits (XEXP (x, 0), mode);
4121 if (nonzero == 1)
4122 return bitwidth;
4123
4124 if (num0 > 1
4125 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4126 num0--;
4127
4128 return num0;
4129
4130 case IOR: case AND: case XOR:
4131 case SMIN: case SMAX: case UMIN: case UMAX:
4132 /* Logical operations will preserve the number of sign-bit copies.
4133 MIN and MAX operations always return one of the operands. */
4134 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4135 known_x, known_mode, known_ret);
4136 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4137 known_x, known_mode, known_ret);
4138 return MIN (num0, num1);
4139
4140 case PLUS: case MINUS:
4141 /* For addition and subtraction, we can have a 1-bit carry. However,
4142 if we are subtracting 1 from a positive number, there will not
4143 be such a carry. Furthermore, if the positive number is known to
4144 be 0 or 1, we know the result is either -1 or 0. */
4145
4146 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4147 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4148 {
4149 nonzero = nonzero_bits (XEXP (x, 0), mode);
4150 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4151 return (nonzero == 1 || nonzero == 0 ? bitwidth
4152 : bitwidth - floor_log2 (nonzero) - 1);
4153 }
4154
4155 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4156 known_x, known_mode, known_ret);
4157 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4158 known_x, known_mode, known_ret);
4159 result = MAX (1, MIN (num0, num1) - 1);
4160
4161 #ifdef POINTERS_EXTEND_UNSIGNED
4162 /* If pointers extend signed and this is an addition or subtraction
4163 to a pointer in Pmode, all the bits above ptr_mode are known to be
4164 sign bit copies. */
4165 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4166 && (code == PLUS || code == MINUS)
4167 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4168 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4169 - GET_MODE_BITSIZE (ptr_mode) + 1),
4170 result);
4171 #endif
4172 return result;
4173
4174 case MULT:
4175 /* The number of bits of the product is the sum of the number of
4176 bits of both terms. However, unless one of the terms if known
4177 to be positive, we must allow for an additional bit since negating
4178 a negative number can remove one sign bit copy. */
4179
4180 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4181 known_x, known_mode, known_ret);
4182 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4183 known_x, known_mode, known_ret);
4184
4185 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4186 if (result > 0
4187 && (bitwidth > HOST_BITS_PER_WIDE_INT
4188 || (((nonzero_bits (XEXP (x, 0), mode)
4189 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4190 && ((nonzero_bits (XEXP (x, 1), mode)
4191 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4192 result--;
4193
4194 return MAX (1, result);
4195
4196 case UDIV:
4197 /* The result must be <= the first operand. If the first operand
4198 has the high bit set, we know nothing about the number of sign
4199 bit copies. */
4200 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4201 return 1;
4202 else if ((nonzero_bits (XEXP (x, 0), mode)
4203 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4204 return 1;
4205 else
4206 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4207 known_x, known_mode, known_ret);
4208
4209 case UMOD:
4210 /* The result must be <= the second operand. */
4211 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4212 known_x, known_mode, known_ret);
4213
4214 case DIV:
4215 /* Similar to unsigned division, except that we have to worry about
4216 the case where the divisor is negative, in which case we have
4217 to add 1. */
4218 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4219 known_x, known_mode, known_ret);
4220 if (result > 1
4221 && (bitwidth > HOST_BITS_PER_WIDE_INT
4222 || (nonzero_bits (XEXP (x, 1), mode)
4223 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4224 result--;
4225
4226 return result;
4227
4228 case MOD:
4229 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4230 known_x, known_mode, known_ret);
4231 if (result > 1
4232 && (bitwidth > HOST_BITS_PER_WIDE_INT
4233 || (nonzero_bits (XEXP (x, 1), mode)
4234 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4235 result--;
4236
4237 return result;
4238
4239 case ASHIFTRT:
4240 /* Shifts by a constant add to the number of bits equal to the
4241 sign bit. */
4242 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4243 known_x, known_mode, known_ret);
4244 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4245 && INTVAL (XEXP (x, 1)) > 0)
4246 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4247
4248 return num0;
4249
4250 case ASHIFT:
4251 /* Left shifts destroy copies. */
4252 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4253 || INTVAL (XEXP (x, 1)) < 0
4254 || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
4255 return 1;
4256
4257 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4258 known_x, known_mode, known_ret);
4259 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4260
4261 case IF_THEN_ELSE:
4262 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4263 known_x, known_mode, known_ret);
4264 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4265 known_x, known_mode, known_ret);
4266 return MIN (num0, num1);
4267
4268 case EQ: case NE: case GE: case GT: case LE: case LT:
4269 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4270 case GEU: case GTU: case LEU: case LTU:
4271 case UNORDERED: case ORDERED:
4272 /* If the constant is negative, take its 1's complement and remask.
4273 Then see how many zero bits we have. */
4274 nonzero = STORE_FLAG_VALUE;
4275 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4276 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4277 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4278
4279 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4280
4281 default:
4282 break;
4283 }
4284
4285 /* If we haven't been able to figure it out by one of the above rules,
4286 see if some of the high-order bits are known to be zero. If so,
4287 count those bits and return one less than that amount. If we can't
4288 safely compute the mask for this mode, always return BITWIDTH. */
4289
4290 bitwidth = GET_MODE_BITSIZE (mode);
4291 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4292 return 1;
4293
4294 nonzero = nonzero_bits (x, mode);
4295 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4296 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4297 }
4298
4299 /* Calculate the rtx_cost of a single instruction. A return value of
4300 zero indicates an instruction pattern without a known cost. */
4301
4302 int
4303 insn_rtx_cost (rtx pat)
4304 {
4305 int i, cost;
4306 rtx set;
4307
4308 /* Extract the single set rtx from the instruction pattern.
4309 We can't use single_set since we only have the pattern. */
4310 if (GET_CODE (pat) == SET)
4311 set = pat;
4312 else if (GET_CODE (pat) == PARALLEL)
4313 {
4314 set = NULL_RTX;
4315 for (i = 0; i < XVECLEN (pat, 0); i++)
4316 {
4317 rtx x = XVECEXP (pat, 0, i);
4318 if (GET_CODE (x) == SET)
4319 {
4320 if (set)
4321 return 0;
4322 set = x;
4323 }
4324 }
4325 if (!set)
4326 return 0;
4327 }
4328 else
4329 return 0;
4330
4331 cost = rtx_cost (SET_SRC (set), SET);
4332 return cost > 0 ? cost : COSTS_N_INSNS (1);
4333 }
4334
4335 /* Given an insn INSN and condition COND, return the condition in a
4336 canonical form to simplify testing by callers. Specifically:
4337
4338 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4339 (2) Both operands will be machine operands; (cc0) will have been replaced.
4340 (3) If an operand is a constant, it will be the second operand.
4341 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4342 for GE, GEU, and LEU.
4343
4344 If the condition cannot be understood, or is an inequality floating-point
4345 comparison which needs to be reversed, 0 will be returned.
4346
4347 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4348
4349 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4350 insn used in locating the condition was found. If a replacement test
4351 of the condition is desired, it should be placed in front of that
4352 insn and we will be sure that the inputs are still valid.
4353
4354 If WANT_REG is nonzero, we wish the condition to be relative to that
4355 register, if possible. Therefore, do not canonicalize the condition
4356 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4357 to be a compare to a CC mode register.
4358
4359 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4360 and at INSN. */
4361
4362 rtx
4363 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4364 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4365 {
4366 enum rtx_code code;
4367 rtx prev = insn;
4368 rtx set;
4369 rtx tem;
4370 rtx op0, op1;
4371 int reverse_code = 0;
4372 enum machine_mode mode;
4373 basic_block bb = BLOCK_FOR_INSN (insn);
4374
4375 code = GET_CODE (cond);
4376 mode = GET_MODE (cond);
4377 op0 = XEXP (cond, 0);
4378 op1 = XEXP (cond, 1);
4379
4380 if (reverse)
4381 code = reversed_comparison_code (cond, insn);
4382 if (code == UNKNOWN)
4383 return 0;
4384
4385 if (earliest)
4386 *earliest = insn;
4387
4388 /* If we are comparing a register with zero, see if the register is set
4389 in the previous insn to a COMPARE or a comparison operation. Perform
4390 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4391 in cse.c */
4392
4393 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4394 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4395 && op1 == CONST0_RTX (GET_MODE (op0))
4396 && op0 != want_reg)
4397 {
4398 /* Set nonzero when we find something of interest. */
4399 rtx x = 0;
4400
4401 #ifdef HAVE_cc0
4402 /* If comparison with cc0, import actual comparison from compare
4403 insn. */
4404 if (op0 == cc0_rtx)
4405 {
4406 if ((prev = prev_nonnote_insn (prev)) == 0
4407 || !NONJUMP_INSN_P (prev)
4408 || (set = single_set (prev)) == 0
4409 || SET_DEST (set) != cc0_rtx)
4410 return 0;
4411
4412 op0 = SET_SRC (set);
4413 op1 = CONST0_RTX (GET_MODE (op0));
4414 if (earliest)
4415 *earliest = prev;
4416 }
4417 #endif
4418
4419 /* If this is a COMPARE, pick up the two things being compared. */
4420 if (GET_CODE (op0) == COMPARE)
4421 {
4422 op1 = XEXP (op0, 1);
4423 op0 = XEXP (op0, 0);
4424 continue;
4425 }
4426 else if (!REG_P (op0))
4427 break;
4428
4429 /* Go back to the previous insn. Stop if it is not an INSN. We also
4430 stop if it isn't a single set or if it has a REG_INC note because
4431 we don't want to bother dealing with it. */
4432
4433 if ((prev = prev_nonnote_insn (prev)) == 0
4434 || !NONJUMP_INSN_P (prev)
4435 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4436 /* In cfglayout mode, there do not have to be labels at the
4437 beginning of a block, or jumps at the end, so the previous
4438 conditions would not stop us when we reach bb boundary. */
4439 || BLOCK_FOR_INSN (prev) != bb)
4440 break;
4441
4442 set = set_of (op0, prev);
4443
4444 if (set
4445 && (GET_CODE (set) != SET
4446 || !rtx_equal_p (SET_DEST (set), op0)))
4447 break;
4448
4449 /* If this is setting OP0, get what it sets it to if it looks
4450 relevant. */
4451 if (set)
4452 {
4453 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4454 #ifdef FLOAT_STORE_FLAG_VALUE
4455 REAL_VALUE_TYPE fsfv;
4456 #endif
4457
4458 /* ??? We may not combine comparisons done in a CCmode with
4459 comparisons not done in a CCmode. This is to aid targets
4460 like Alpha that have an IEEE compliant EQ instruction, and
4461 a non-IEEE compliant BEQ instruction. The use of CCmode is
4462 actually artificial, simply to prevent the combination, but
4463 should not affect other platforms.
4464
4465 However, we must allow VOIDmode comparisons to match either
4466 CCmode or non-CCmode comparison, because some ports have
4467 modeless comparisons inside branch patterns.
4468
4469 ??? This mode check should perhaps look more like the mode check
4470 in simplify_comparison in combine. */
4471
4472 if ((GET_CODE (SET_SRC (set)) == COMPARE
4473 || (((code == NE
4474 || (code == LT
4475 && GET_MODE_CLASS (inner_mode) == MODE_INT
4476 && (GET_MODE_BITSIZE (inner_mode)
4477 <= HOST_BITS_PER_WIDE_INT)
4478 && (STORE_FLAG_VALUE
4479 & ((HOST_WIDE_INT) 1
4480 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4481 #ifdef FLOAT_STORE_FLAG_VALUE
4482 || (code == LT
4483 && SCALAR_FLOAT_MODE_P (inner_mode)
4484 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4485 REAL_VALUE_NEGATIVE (fsfv)))
4486 #endif
4487 ))
4488 && COMPARISON_P (SET_SRC (set))))
4489 && (((GET_MODE_CLASS (mode) == MODE_CC)
4490 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4491 || mode == VOIDmode || inner_mode == VOIDmode))
4492 x = SET_SRC (set);
4493 else if (((code == EQ
4494 || (code == GE
4495 && (GET_MODE_BITSIZE (inner_mode)
4496 <= HOST_BITS_PER_WIDE_INT)
4497 && GET_MODE_CLASS (inner_mode) == MODE_INT
4498 && (STORE_FLAG_VALUE
4499 & ((HOST_WIDE_INT) 1
4500 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4501 #ifdef FLOAT_STORE_FLAG_VALUE
4502 || (code == GE
4503 && SCALAR_FLOAT_MODE_P (inner_mode)
4504 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4505 REAL_VALUE_NEGATIVE (fsfv)))
4506 #endif
4507 ))
4508 && COMPARISON_P (SET_SRC (set))
4509 && (((GET_MODE_CLASS (mode) == MODE_CC)
4510 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4511 || mode == VOIDmode || inner_mode == VOIDmode))
4512
4513 {
4514 reverse_code = 1;
4515 x = SET_SRC (set);
4516 }
4517 else
4518 break;
4519 }
4520
4521 else if (reg_set_p (op0, prev))
4522 /* If this sets OP0, but not directly, we have to give up. */
4523 break;
4524
4525 if (x)
4526 {
4527 /* If the caller is expecting the condition to be valid at INSN,
4528 make sure X doesn't change before INSN. */
4529 if (valid_at_insn_p)
4530 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4531 break;
4532 if (COMPARISON_P (x))
4533 code = GET_CODE (x);
4534 if (reverse_code)
4535 {
4536 code = reversed_comparison_code (x, prev);
4537 if (code == UNKNOWN)
4538 return 0;
4539 reverse_code = 0;
4540 }
4541
4542 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4543 if (earliest)
4544 *earliest = prev;
4545 }
4546 }
4547
4548 /* If constant is first, put it last. */
4549 if (CONSTANT_P (op0))
4550 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4551
4552 /* If OP0 is the result of a comparison, we weren't able to find what
4553 was really being compared, so fail. */
4554 if (!allow_cc_mode
4555 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4556 return 0;
4557
4558 /* Canonicalize any ordered comparison with integers involving equality
4559 if we can do computations in the relevant mode and we do not
4560 overflow. */
4561
4562 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4563 && GET_CODE (op1) == CONST_INT
4564 && GET_MODE (op0) != VOIDmode
4565 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4566 {
4567 HOST_WIDE_INT const_val = INTVAL (op1);
4568 unsigned HOST_WIDE_INT uconst_val = const_val;
4569 unsigned HOST_WIDE_INT max_val
4570 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4571
4572 switch (code)
4573 {
4574 case LE:
4575 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4576 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4577 break;
4578
4579 /* When cross-compiling, const_val might be sign-extended from
4580 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4581 case GE:
4582 if ((HOST_WIDE_INT) (const_val & max_val)
4583 != (((HOST_WIDE_INT) 1
4584 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4585 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4586 break;
4587
4588 case LEU:
4589 if (uconst_val < max_val)
4590 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4591 break;
4592
4593 case GEU:
4594 if (uconst_val != 0)
4595 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4596 break;
4597
4598 default:
4599 break;
4600 }
4601 }
4602
4603 /* Never return CC0; return zero instead. */
4604 if (CC0_P (op0))
4605 return 0;
4606
4607 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4608 }
4609
4610 /* Given a jump insn JUMP, return the condition that will cause it to branch
4611 to its JUMP_LABEL. If the condition cannot be understood, or is an
4612 inequality floating-point comparison which needs to be reversed, 0 will
4613 be returned.
4614
4615 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4616 insn used in locating the condition was found. If a replacement test
4617 of the condition is desired, it should be placed in front of that
4618 insn and we will be sure that the inputs are still valid. If EARLIEST
4619 is null, the returned condition will be valid at INSN.
4620
4621 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4622 compare CC mode register.
4623
4624 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4625
4626 rtx
4627 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4628 {
4629 rtx cond;
4630 int reverse;
4631 rtx set;
4632
4633 /* If this is not a standard conditional jump, we can't parse it. */
4634 if (!JUMP_P (jump)
4635 || ! any_condjump_p (jump))
4636 return 0;
4637 set = pc_set (jump);
4638
4639 cond = XEXP (SET_SRC (set), 0);
4640
4641 /* If this branches to JUMP_LABEL when the condition is false, reverse
4642 the condition. */
4643 reverse
4644 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4645 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4646
4647 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4648 allow_cc_mode, valid_at_insn_p);
4649 }
4650
4651 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4652 TARGET_MODE_REP_EXTENDED.
4653
4654 Note that we assume that the property of
4655 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4656 narrower than mode B. I.e., if A is a mode narrower than B then in
4657 order to be able to operate on it in mode B, mode A needs to
4658 satisfy the requirements set by the representation of mode B. */
4659
4660 static void
4661 init_num_sign_bit_copies_in_rep (void)
4662 {
4663 enum machine_mode mode, in_mode;
4664
4665 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4666 in_mode = GET_MODE_WIDER_MODE (mode))
4667 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4668 mode = GET_MODE_WIDER_MODE (mode))
4669 {
4670 enum machine_mode i;
4671
4672 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4673 extends to the next widest mode. */
4674 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4675 || GET_MODE_WIDER_MODE (mode) == in_mode);
4676
4677 /* We are in in_mode. Count how many bits outside of mode
4678 have to be copies of the sign-bit. */
4679 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4680 {
4681 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4682
4683 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4684 /* We can only check sign-bit copies starting from the
4685 top-bit. In order to be able to check the bits we
4686 have already seen we pretend that subsequent bits
4687 have to be sign-bit copies too. */
4688 || num_sign_bit_copies_in_rep [in_mode][mode])
4689 num_sign_bit_copies_in_rep [in_mode][mode]
4690 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4691 }
4692 }
4693 }
4694
4695 /* Suppose that truncation from the machine mode of X to MODE is not a
4696 no-op. See if there is anything special about X so that we can
4697 assume it already contains a truncated value of MODE. */
4698
4699 bool
4700 truncated_to_mode (enum machine_mode mode, rtx x)
4701 {
4702 /* This register has already been used in MODE without explicit
4703 truncation. */
4704 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4705 return true;
4706
4707 /* See if we already satisfy the requirements of MODE. If yes we
4708 can just switch to MODE. */
4709 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4710 && (num_sign_bit_copies (x, GET_MODE (x))
4711 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
4712 return true;
4713
4714 return false;
4715 }
4716 \f
4717 /* Initialize non_rtx_starting_operands, which is used to speed up
4718 for_each_rtx. */
4719 void
4720 init_rtlanal (void)
4721 {
4722 int i;
4723 for (i = 0; i < NUM_RTX_CODE; i++)
4724 {
4725 const char *format = GET_RTX_FORMAT (i);
4726 const char *first = strpbrk (format, "eEV");
4727 non_rtx_starting_operands[i] = first ? first - format : -1;
4728 }
4729
4730 init_num_sign_bit_copies_in_rep ();
4731 }
4732 \f
4733 /* Check whether this is a constant pool constant. */
4734 bool
4735 constant_pool_constant_p (rtx x)
4736 {
4737 x = avoid_constant_pool_reference (x);
4738 return GET_CODE (x) == CONST_DOUBLE;
4739 }
4740