1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 /* Forward declarations */
43 static void set_of_1 (rtx
, const_rtx
, void *);
44 static bool covers_regno_p (const_rtx
, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
46 static int rtx_referenced_p_1 (rtx
*, void *);
47 static int computed_jump_p_1 (const_rtx
);
48 static void parms_set (rtx
, const_rtx
, void *);
50 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
51 const_rtx
, enum machine_mode
,
52 unsigned HOST_WIDE_INT
);
53 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
54 const_rtx
, enum machine_mode
,
55 unsigned HOST_WIDE_INT
);
56 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
59 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
60 enum machine_mode
, unsigned int);
62 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
63 -1 if a code has no such operand. */
64 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
66 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
67 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
68 SIGN_EXTEND then while narrowing we also have to enforce the
69 representation and sign-extend the value to mode DESTINATION_REP.
71 If the value is already sign-extended to DESTINATION_REP mode we
72 can just switch to DESTINATION mode on it. For each pair of
73 integral modes SOURCE and DESTINATION, when truncating from SOURCE
74 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
75 contains the number of high-order bits in SOURCE that have to be
76 copies of the sign-bit so that we can do this mode-switch to
80 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
82 /* Return 1 if the value of X is unstable
83 (would be different at a different point in the program).
84 The frame pointer, arg pointer, etc. are considered stable
85 (within one function) and so is anything marked `unchanging'. */
88 rtx_unstable_p (const_rtx x
)
90 const RTX_CODE code
= GET_CODE (x
);
97 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
106 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
107 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
108 /* The arg pointer varies if it is not a fixed register. */
109 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
111 /* ??? When call-clobbered, the value is stable modulo the restore
112 that must happen after a call. This currently screws up local-alloc
113 into believing that the restore is not needed. */
114 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
119 if (MEM_VOLATILE_P (x
))
128 fmt
= GET_RTX_FORMAT (code
);
129 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
132 if (rtx_unstable_p (XEXP (x
, i
)))
135 else if (fmt
[i
] == 'E')
138 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
139 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
146 /* Return 1 if X has a value that can vary even between two
147 executions of the program. 0 means X can be compared reliably
148 against certain constants or near-constants.
149 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
150 zero, we are slightly more conservative.
151 The frame pointer and the arg pointer are considered constant. */
154 rtx_varies_p (const_rtx x
, bool for_alias
)
167 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
176 /* Note that we have to test for the actual rtx used for the frame
177 and arg pointers and not just the register number in case we have
178 eliminated the frame and/or arg pointer and are using it
180 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
181 /* The arg pointer varies if it is not a fixed register. */
182 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
184 if (x
== pic_offset_table_rtx
185 /* ??? When call-clobbered, the value is stable modulo the restore
186 that must happen after a call. This currently screws up
187 local-alloc into believing that the restore is not needed, so we
188 must return 0 only if we are called from alias analysis. */
189 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
194 /* The operand 0 of a LO_SUM is considered constant
195 (in fact it is related specifically to operand 1)
196 during alias analysis. */
197 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
198 || rtx_varies_p (XEXP (x
, 1), for_alias
);
201 if (MEM_VOLATILE_P (x
))
210 fmt
= GET_RTX_FORMAT (code
);
211 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
214 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
217 else if (fmt
[i
] == 'E')
220 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
221 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
228 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
229 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
230 whether nonzero is returned for unaligned memory accesses on strict
231 alignment machines. */
234 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
235 enum machine_mode mode
, bool unaligned_mems
)
237 enum rtx_code code
= GET_CODE (x
);
241 && GET_MODE_SIZE (mode
) != 0)
243 HOST_WIDE_INT actual_offset
= offset
;
244 #ifdef SPARC_STACK_BOUNDARY_HACK
245 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
246 the real alignment of %sp. However, when it does this, the
247 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
248 if (SPARC_STACK_BOUNDARY_HACK
249 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
250 actual_offset
-= STACK_POINTER_OFFSET
;
253 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
260 if (SYMBOL_REF_WEAK (x
))
262 if (!CONSTANT_POOL_ADDRESS_P (x
))
265 HOST_WIDE_INT decl_size
;
270 size
= GET_MODE_SIZE (mode
);
274 /* If the size of the access or of the symbol is unknown,
276 decl
= SYMBOL_REF_DECL (x
);
278 /* Else check that the access is in bounds. TODO: restructure
279 expr_size/tree_expr_size/int_expr_size and just use the latter. */
282 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
283 decl_size
= (host_integerp (DECL_SIZE_UNIT (decl
), 0)
284 ? tree_low_cst (DECL_SIZE_UNIT (decl
), 0)
286 else if (TREE_CODE (decl
) == STRING_CST
)
287 decl_size
= TREE_STRING_LENGTH (decl
);
288 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
289 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
293 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
302 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
303 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
304 || x
== stack_pointer_rtx
305 /* The arg pointer varies if it is not a fixed register. */
306 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
308 /* All of the virtual frame registers are stack references. */
309 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
310 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
315 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
316 mode
, unaligned_mems
);
319 /* An address is assumed not to trap if:
320 - it is the pic register plus a constant. */
321 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
324 /* - or it is an address that can't trap plus a constant integer,
325 with the proper remainder modulo the mode size if we are
326 considering unaligned memory references. */
327 if (CONST_INT_P (XEXP (x
, 1))
328 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
329 size
, mode
, unaligned_mems
))
336 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
337 mode
, unaligned_mems
);
344 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
345 mode
, unaligned_mems
);
351 /* If it isn't one of the case above, it can cause a trap. */
355 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
358 rtx_addr_can_trap_p (const_rtx x
)
360 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
363 /* Return true if X is an address that is known to not be zero. */
366 nonzero_address_p (const_rtx x
)
368 const enum rtx_code code
= GET_CODE (x
);
373 return !SYMBOL_REF_WEAK (x
);
379 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
380 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
381 || x
== stack_pointer_rtx
382 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
384 /* All of the virtual frame registers are stack references. */
385 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
386 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
391 return nonzero_address_p (XEXP (x
, 0));
394 if (CONST_INT_P (XEXP (x
, 1)))
395 return nonzero_address_p (XEXP (x
, 0));
396 /* Handle PIC references. */
397 else if (XEXP (x
, 0) == pic_offset_table_rtx
398 && CONSTANT_P (XEXP (x
, 1)))
403 /* Similar to the above; allow positive offsets. Further, since
404 auto-inc is only allowed in memories, the register must be a
406 if (CONST_INT_P (XEXP (x
, 1))
407 && INTVAL (XEXP (x
, 1)) > 0)
409 return nonzero_address_p (XEXP (x
, 0));
412 /* Similarly. Further, the offset is always positive. */
419 return nonzero_address_p (XEXP (x
, 0));
422 return nonzero_address_p (XEXP (x
, 1));
428 /* If it isn't one of the case above, might be zero. */
432 /* Return 1 if X refers to a memory location whose address
433 cannot be compared reliably with constant addresses,
434 or if X refers to a BLKmode memory object.
435 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
436 zero, we are slightly more conservative. */
439 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
450 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
452 fmt
= GET_RTX_FORMAT (code
);
453 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
456 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
459 else if (fmt
[i
] == 'E')
462 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
463 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
469 /* Return the CALL in X if there is one. */
472 get_call_rtx_from (rtx x
)
476 if (GET_CODE (x
) == PARALLEL
)
477 x
= XVECEXP (x
, 0, 0);
478 if (GET_CODE (x
) == SET
)
480 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
485 /* Return the value of the integer term in X, if one is apparent;
487 Only obvious integer terms are detected.
488 This is used in cse.c with the `related_value' field. */
491 get_integer_term (const_rtx x
)
493 if (GET_CODE (x
) == CONST
)
496 if (GET_CODE (x
) == MINUS
497 && CONST_INT_P (XEXP (x
, 1)))
498 return - INTVAL (XEXP (x
, 1));
499 if (GET_CODE (x
) == PLUS
500 && CONST_INT_P (XEXP (x
, 1)))
501 return INTVAL (XEXP (x
, 1));
505 /* If X is a constant, return the value sans apparent integer term;
507 Only obvious integer terms are detected. */
510 get_related_value (const_rtx x
)
512 if (GET_CODE (x
) != CONST
)
515 if (GET_CODE (x
) == PLUS
516 && CONST_INT_P (XEXP (x
, 1)))
518 else if (GET_CODE (x
) == MINUS
519 && CONST_INT_P (XEXP (x
, 1)))
524 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
525 to somewhere in the same object or object_block as SYMBOL. */
528 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
532 if (GET_CODE (symbol
) != SYMBOL_REF
)
540 if (CONSTANT_POOL_ADDRESS_P (symbol
)
541 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
544 decl
= SYMBOL_REF_DECL (symbol
);
545 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
549 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
550 && SYMBOL_REF_BLOCK (symbol
)
551 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
552 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
553 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
559 /* Split X into a base and a constant offset, storing them in *BASE_OUT
560 and *OFFSET_OUT respectively. */
563 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
565 if (GET_CODE (x
) == CONST
)
568 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
570 *base_out
= XEXP (x
, 0);
571 *offset_out
= XEXP (x
, 1);
576 *offset_out
= const0_rtx
;
579 /* Return the number of places FIND appears within X. If COUNT_DEST is
580 zero, we do not count occurrences inside the destination of a SET. */
583 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
587 const char *format_ptr
;
606 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
608 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
612 if (MEM_P (find
) && rtx_equal_p (x
, find
))
617 if (SET_DEST (x
) == find
&& ! count_dest
)
618 return count_occurrences (SET_SRC (x
), find
, count_dest
);
625 format_ptr
= GET_RTX_FORMAT (code
);
628 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
630 switch (*format_ptr
++)
633 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
637 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
638 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
646 /* Return TRUE if OP is a register or subreg of a register that
647 holds an unsigned quantity. Otherwise, return FALSE. */
650 unsigned_reg_p (rtx op
)
654 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
657 if (GET_CODE (op
) == SUBREG
658 && SUBREG_PROMOTED_UNSIGNED_P (op
))
665 /* Nonzero if register REG appears somewhere within IN.
666 Also works if REG is not a register; in this case it checks
667 for a subexpression of IN that is Lisp "equal" to REG. */
670 reg_mentioned_p (const_rtx reg
, const_rtx in
)
682 if (GET_CODE (in
) == LABEL_REF
)
683 return reg
== XEXP (in
, 0);
685 code
= GET_CODE (in
);
689 /* Compare registers by number. */
691 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
693 /* These codes have no constituent expressions
701 /* These are kept unique for a given value. */
708 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
711 fmt
= GET_RTX_FORMAT (code
);
713 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
718 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
719 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
722 else if (fmt
[i
] == 'e'
723 && reg_mentioned_p (reg
, XEXP (in
, i
)))
729 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
730 no CODE_LABEL insn. */
733 no_labels_between_p (const_rtx beg
, const_rtx end
)
738 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
744 /* Nonzero if register REG is used in an insn between
745 FROM_INSN and TO_INSN (exclusive of those two). */
748 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
752 if (from_insn
== to_insn
)
755 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
756 if (NONDEBUG_INSN_P (insn
)
757 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
758 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
763 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
764 is entirely replaced by a new value and the only use is as a SET_DEST,
765 we do not consider it a reference. */
768 reg_referenced_p (const_rtx x
, const_rtx body
)
772 switch (GET_CODE (body
))
775 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
778 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
779 of a REG that occupies all of the REG, the insn references X if
780 it is mentioned in the destination. */
781 if (GET_CODE (SET_DEST (body
)) != CC0
782 && GET_CODE (SET_DEST (body
)) != PC
783 && !REG_P (SET_DEST (body
))
784 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
785 && REG_P (SUBREG_REG (SET_DEST (body
)))
786 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
787 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
788 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
789 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
790 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
795 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
796 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
803 return reg_overlap_mentioned_p (x
, body
);
806 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
809 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
812 case UNSPEC_VOLATILE
:
813 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
814 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
819 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
820 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
825 if (MEM_P (XEXP (body
, 0)))
826 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
831 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
833 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
840 /* Nonzero if register REG is set or clobbered in an insn between
841 FROM_INSN and TO_INSN (exclusive of those two). */
844 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
848 if (from_insn
== to_insn
)
851 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
852 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
857 /* Internals of reg_set_between_p. */
859 reg_set_p (const_rtx reg
, const_rtx insn
)
861 /* We can be passed an insn or part of one. If we are passed an insn,
862 check if a side-effect of the insn clobbers REG. */
864 && (FIND_REG_INC_NOTE (insn
, reg
)
867 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
868 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
869 GET_MODE (reg
), REGNO (reg
)))
871 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
874 return set_of (reg
, insn
) != NULL_RTX
;
877 /* Similar to reg_set_between_p, but check all registers in X. Return 0
878 only if none of them are modified between START and END. Return 1 if
879 X contains a MEM; this routine does use memory aliasing. */
882 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
884 const enum rtx_code code
= GET_CODE (x
);
905 if (modified_between_p (XEXP (x
, 0), start
, end
))
907 if (MEM_READONLY_P (x
))
909 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
910 if (memory_modified_in_insn_p (x
, insn
))
916 return reg_set_between_p (x
, start
, end
);
922 fmt
= GET_RTX_FORMAT (code
);
923 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
925 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
928 else if (fmt
[i
] == 'E')
929 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
930 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
937 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
938 of them are modified in INSN. Return 1 if X contains a MEM; this routine
939 does use memory aliasing. */
942 modified_in_p (const_rtx x
, const_rtx insn
)
944 const enum rtx_code code
= GET_CODE (x
);
961 if (modified_in_p (XEXP (x
, 0), insn
))
963 if (MEM_READONLY_P (x
))
965 if (memory_modified_in_insn_p (x
, insn
))
971 return reg_set_p (x
, insn
);
977 fmt
= GET_RTX_FORMAT (code
);
978 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
980 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
983 else if (fmt
[i
] == 'E')
984 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
985 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
992 /* Helper function for set_of. */
1000 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1002 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1003 if (rtx_equal_p (x
, data
->pat
)
1004 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1008 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1009 (either directly or via STRICT_LOW_PART and similar modifiers). */
1011 set_of (const_rtx pat
, const_rtx insn
)
1013 struct set_of_data data
;
1014 data
.found
= NULL_RTX
;
1016 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1020 /* This function, called through note_stores, collects sets and
1021 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1024 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1026 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1027 if (REG_P (x
) && HARD_REGISTER_P (x
))
1028 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1031 /* Examine INSN, and compute the set of hard registers written by it.
1032 Store it in *PSET. Should only be called after reload. */
1034 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
)
1038 CLEAR_HARD_REG_SET (*pset
);
1039 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1041 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1042 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1043 if (REG_NOTE_KIND (link
) == REG_INC
)
1044 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1047 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1049 record_hard_reg_uses_1 (rtx
*px
, void *data
)
1052 HARD_REG_SET
*pused
= (HARD_REG_SET
*)data
;
1054 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1056 int nregs
= hard_regno_nregs
[REGNO (x
)][GET_MODE (x
)];
1058 SET_HARD_REG_BIT (*pused
, REGNO (x
) + nregs
);
1063 /* Like record_hard_reg_sets, but called through note_uses. */
1065 record_hard_reg_uses (rtx
*px
, void *data
)
1067 for_each_rtx (px
, record_hard_reg_uses_1
, data
);
1070 /* Given an INSN, return a SET expression if this insn has only a single SET.
1071 It may also have CLOBBERs, USEs, or SET whose output
1072 will not be used, which we ignore. */
1075 single_set_2 (const_rtx insn
, const_rtx pat
)
1078 int set_verified
= 1;
1081 if (GET_CODE (pat
) == PARALLEL
)
1083 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1085 rtx sub
= XVECEXP (pat
, 0, i
);
1086 switch (GET_CODE (sub
))
1093 /* We can consider insns having multiple sets, where all
1094 but one are dead as single set insns. In common case
1095 only single set is present in the pattern so we want
1096 to avoid checking for REG_UNUSED notes unless necessary.
1098 When we reach set first time, we just expect this is
1099 the single set we are looking for and only when more
1100 sets are found in the insn, we check them. */
1103 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1104 && !side_effects_p (set
))
1110 set
= sub
, set_verified
= 0;
1111 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1112 || side_effects_p (sub
))
1124 /* Given an INSN, return nonzero if it has more than one SET, else return
1128 multiple_sets (const_rtx insn
)
1133 /* INSN must be an insn. */
1134 if (! INSN_P (insn
))
1137 /* Only a PARALLEL can have multiple SETs. */
1138 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1140 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1141 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1143 /* If we have already found a SET, then return now. */
1151 /* Either zero or one SET. */
1155 /* Return nonzero if the destination of SET equals the source
1156 and there are no side effects. */
1159 set_noop_p (const_rtx set
)
1161 rtx src
= SET_SRC (set
);
1162 rtx dst
= SET_DEST (set
);
1164 if (dst
== pc_rtx
&& src
== pc_rtx
)
1167 if (MEM_P (dst
) && MEM_P (src
))
1168 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1170 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1171 return rtx_equal_p (XEXP (dst
, 0), src
)
1172 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1173 && !side_effects_p (src
);
1175 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1176 dst
= XEXP (dst
, 0);
1178 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1180 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1182 src
= SUBREG_REG (src
);
1183 dst
= SUBREG_REG (dst
);
1186 return (REG_P (src
) && REG_P (dst
)
1187 && REGNO (src
) == REGNO (dst
));
1190 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1194 noop_move_p (const_rtx insn
)
1196 rtx pat
= PATTERN (insn
);
1198 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1201 /* Insns carrying these notes are useful later on. */
1202 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1205 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1208 if (GET_CODE (pat
) == PARALLEL
)
1211 /* If nothing but SETs of registers to themselves,
1212 this insn can also be deleted. */
1213 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1215 rtx tem
= XVECEXP (pat
, 0, i
);
1217 if (GET_CODE (tem
) == USE
1218 || GET_CODE (tem
) == CLOBBER
)
1221 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1231 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1232 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1233 If the object was modified, if we hit a partial assignment to X, or hit a
1234 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1235 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1239 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1243 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1247 rtx set
= single_set (p
);
1248 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1250 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1252 rtx src
= SET_SRC (set
);
1254 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1255 src
= XEXP (note
, 0);
1257 if ((valid_to
== NULL_RTX
1258 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1259 /* Reject hard registers because we don't usually want
1260 to use them; we'd rather use a pseudo. */
1262 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1269 /* If set in non-simple way, we don't have a value. */
1270 if (reg_set_p (x
, p
))
1277 /* Return nonzero if register in range [REGNO, ENDREGNO)
1278 appears either explicitly or implicitly in X
1279 other than being stored into.
1281 References contained within the substructure at LOC do not count.
1282 LOC may be zero, meaning don't ignore anything. */
1285 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1289 unsigned int x_regno
;
1294 /* The contents of a REG_NONNEG note is always zero, so we must come here
1295 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1299 code
= GET_CODE (x
);
1304 x_regno
= REGNO (x
);
1306 /* If we modifying the stack, frame, or argument pointer, it will
1307 clobber a virtual register. In fact, we could be more precise,
1308 but it isn't worth it. */
1309 if ((x_regno
== STACK_POINTER_REGNUM
1310 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1311 || x_regno
== ARG_POINTER_REGNUM
1313 || x_regno
== FRAME_POINTER_REGNUM
)
1314 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1317 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1320 /* If this is a SUBREG of a hard reg, we can see exactly which
1321 registers are being modified. Otherwise, handle normally. */
1322 if (REG_P (SUBREG_REG (x
))
1323 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1325 unsigned int inner_regno
= subreg_regno (x
);
1326 unsigned int inner_endregno
1327 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1328 ? subreg_nregs (x
) : 1);
1330 return endregno
> inner_regno
&& regno
< inner_endregno
;
1336 if (&SET_DEST (x
) != loc
1337 /* Note setting a SUBREG counts as referring to the REG it is in for
1338 a pseudo but not for hard registers since we can
1339 treat each word individually. */
1340 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1341 && loc
!= &SUBREG_REG (SET_DEST (x
))
1342 && REG_P (SUBREG_REG (SET_DEST (x
)))
1343 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1344 && refers_to_regno_p (regno
, endregno
,
1345 SUBREG_REG (SET_DEST (x
)), loc
))
1346 || (!REG_P (SET_DEST (x
))
1347 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1350 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1359 /* X does not match, so try its subexpressions. */
1361 fmt
= GET_RTX_FORMAT (code
);
1362 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1364 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1372 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1375 else if (fmt
[i
] == 'E')
1378 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1379 if (loc
!= &XVECEXP (x
, i
, j
)
1380 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1387 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1388 we check if any register number in X conflicts with the relevant register
1389 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1390 contains a MEM (we don't bother checking for memory addresses that can't
1391 conflict because we expect this to be a rare case. */
1394 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1396 unsigned int regno
, endregno
;
1398 /* If either argument is a constant, then modifying X can not
1399 affect IN. Here we look at IN, we can profitably combine
1400 CONSTANT_P (x) with the switch statement below. */
1401 if (CONSTANT_P (in
))
1405 switch (GET_CODE (x
))
1407 case STRICT_LOW_PART
:
1410 /* Overly conservative. */
1415 regno
= REGNO (SUBREG_REG (x
));
1416 if (regno
< FIRST_PSEUDO_REGISTER
)
1417 regno
= subreg_regno (x
);
1418 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1419 ? subreg_nregs (x
) : 1);
1424 endregno
= END_REGNO (x
);
1426 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1436 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1437 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1440 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1443 else if (fmt
[i
] == 'E')
1446 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1447 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1457 return reg_mentioned_p (x
, in
);
1463 /* If any register in here refers to it we return true. */
1464 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1465 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1466 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1472 gcc_assert (CONSTANT_P (x
));
1477 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1478 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1479 ignored by note_stores, but passed to FUN.
1481 FUN receives three arguments:
1482 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1483 2. the SET or CLOBBER rtx that does the store,
1484 3. the pointer DATA provided to note_stores.
1486 If the item being stored in or clobbered is a SUBREG of a hard register,
1487 the SUBREG will be passed. */
1490 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1494 if (GET_CODE (x
) == COND_EXEC
)
1495 x
= COND_EXEC_CODE (x
);
1497 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1499 rtx dest
= SET_DEST (x
);
1501 while ((GET_CODE (dest
) == SUBREG
1502 && (!REG_P (SUBREG_REG (dest
))
1503 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1504 || GET_CODE (dest
) == ZERO_EXTRACT
1505 || GET_CODE (dest
) == STRICT_LOW_PART
)
1506 dest
= XEXP (dest
, 0);
1508 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1509 each of whose first operand is a register. */
1510 if (GET_CODE (dest
) == PARALLEL
)
1512 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1513 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1514 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1517 (*fun
) (dest
, x
, data
);
1520 else if (GET_CODE (x
) == PARALLEL
)
1521 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1522 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1525 /* Like notes_stores, but call FUN for each expression that is being
1526 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1527 FUN for each expression, not any interior subexpressions. FUN receives a
1528 pointer to the expression and the DATA passed to this function.
1530 Note that this is not quite the same test as that done in reg_referenced_p
1531 since that considers something as being referenced if it is being
1532 partially set, while we do not. */
1535 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1540 switch (GET_CODE (body
))
1543 (*fun
) (&COND_EXEC_TEST (body
), data
);
1544 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1548 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1549 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1553 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1554 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1558 (*fun
) (&XEXP (body
, 0), data
);
1562 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1563 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1567 (*fun
) (&TRAP_CONDITION (body
), data
);
1571 (*fun
) (&XEXP (body
, 0), data
);
1575 case UNSPEC_VOLATILE
:
1576 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1577 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1581 if (MEM_P (XEXP (body
, 0)))
1582 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1587 rtx dest
= SET_DEST (body
);
1589 /* For sets we replace everything in source plus registers in memory
1590 expression in store and operands of a ZERO_EXTRACT. */
1591 (*fun
) (&SET_SRC (body
), data
);
1593 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1595 (*fun
) (&XEXP (dest
, 1), data
);
1596 (*fun
) (&XEXP (dest
, 2), data
);
1599 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1600 dest
= XEXP (dest
, 0);
1603 (*fun
) (&XEXP (dest
, 0), data
);
1608 /* All the other possibilities never store. */
1609 (*fun
) (pbody
, data
);
1614 /* Return nonzero if X's old contents don't survive after INSN.
1615 This will be true if X is (cc0) or if X is a register and
1616 X dies in INSN or because INSN entirely sets X.
1618 "Entirely set" means set directly and not through a SUBREG, or
1619 ZERO_EXTRACT, so no trace of the old contents remains.
1620 Likewise, REG_INC does not count.
1622 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1623 but for this use that makes no difference, since regs don't overlap
1624 during their lifetimes. Therefore, this function may be used
1625 at any time after deaths have been computed.
1627 If REG is a hard reg that occupies multiple machine registers, this
1628 function will only return 1 if each of those registers will be replaced
1632 dead_or_set_p (const_rtx insn
, const_rtx x
)
1634 unsigned int regno
, end_regno
;
1637 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1638 if (GET_CODE (x
) == CC0
)
1641 gcc_assert (REG_P (x
));
1644 end_regno
= END_REGNO (x
);
1645 for (i
= regno
; i
< end_regno
; i
++)
1646 if (! dead_or_set_regno_p (insn
, i
))
1652 /* Return TRUE iff DEST is a register or subreg of a register and
1653 doesn't change the number of words of the inner register, and any
1654 part of the register is TEST_REGNO. */
1657 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1659 unsigned int regno
, endregno
;
1661 if (GET_CODE (dest
) == SUBREG
1662 && (((GET_MODE_SIZE (GET_MODE (dest
))
1663 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1664 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1665 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1666 dest
= SUBREG_REG (dest
);
1671 regno
= REGNO (dest
);
1672 endregno
= END_REGNO (dest
);
1673 return (test_regno
>= regno
&& test_regno
< endregno
);
1676 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1677 any member matches the covers_regno_no_parallel_p criteria. */
1680 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1682 if (GET_CODE (dest
) == PARALLEL
)
1684 /* Some targets place small structures in registers for return
1685 values of functions, and those registers are wrapped in
1686 PARALLELs that we may see as the destination of a SET. */
1689 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1691 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1692 if (inner
!= NULL_RTX
1693 && covers_regno_no_parallel_p (inner
, test_regno
))
1700 return covers_regno_no_parallel_p (dest
, test_regno
);
1703 /* Utility function for dead_or_set_p to check an individual register. */
1706 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1710 /* See if there is a death note for something that includes TEST_REGNO. */
1711 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1715 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1718 pattern
= PATTERN (insn
);
1720 /* If a COND_EXEC is not executed, the value survives. */
1721 if (GET_CODE (pattern
) == COND_EXEC
)
1724 if (GET_CODE (pattern
) == SET
)
1725 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1726 else if (GET_CODE (pattern
) == PARALLEL
)
1730 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1732 rtx body
= XVECEXP (pattern
, 0, i
);
1734 if (GET_CODE (body
) == COND_EXEC
)
1735 body
= COND_EXEC_CODE (body
);
1737 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1738 && covers_regno_p (SET_DEST (body
), test_regno
))
1746 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1747 If DATUM is nonzero, look for one whose datum is DATUM. */
1750 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1754 gcc_checking_assert (insn
);
1756 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1757 if (! INSN_P (insn
))
1761 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1762 if (REG_NOTE_KIND (link
) == kind
)
1767 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1768 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1773 /* Return the reg-note of kind KIND in insn INSN which applies to register
1774 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1775 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1776 it might be the case that the note overlaps REGNO. */
1779 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1783 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1784 if (! INSN_P (insn
))
1787 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1788 if (REG_NOTE_KIND (link
) == kind
1789 /* Verify that it is a register, so that scratch and MEM won't cause a
1791 && REG_P (XEXP (link
, 0))
1792 && REGNO (XEXP (link
, 0)) <= regno
1793 && END_REGNO (XEXP (link
, 0)) > regno
)
1798 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1802 find_reg_equal_equiv_note (const_rtx insn
)
1809 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1810 if (REG_NOTE_KIND (link
) == REG_EQUAL
1811 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1813 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1814 insns that have multiple sets. Checking single_set to
1815 make sure of this is not the proper check, as explained
1816 in the comment in set_unique_reg_note.
1818 This should be changed into an assert. */
1819 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1826 /* Check whether INSN is a single_set whose source is known to be
1827 equivalent to a constant. Return that constant if so, otherwise
1831 find_constant_src (const_rtx insn
)
1835 set
= single_set (insn
);
1838 x
= avoid_constant_pool_reference (SET_SRC (set
));
1843 note
= find_reg_equal_equiv_note (insn
);
1844 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1845 return XEXP (note
, 0);
1850 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1851 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1854 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1856 /* If it's not a CALL_INSN, it can't possibly have a
1857 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1867 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1869 link
= XEXP (link
, 1))
1870 if (GET_CODE (XEXP (link
, 0)) == code
1871 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1876 unsigned int regno
= REGNO (datum
);
1878 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1879 to pseudo registers, so don't bother checking. */
1881 if (regno
< FIRST_PSEUDO_REGISTER
)
1883 unsigned int end_regno
= END_HARD_REGNO (datum
);
1886 for (i
= regno
; i
< end_regno
; i
++)
1887 if (find_regno_fusage (insn
, code
, i
))
1895 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1896 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1899 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
1903 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1904 to pseudo registers, so don't bother checking. */
1906 if (regno
>= FIRST_PSEUDO_REGISTER
1910 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1914 if (GET_CODE (op
= XEXP (link
, 0)) == code
1915 && REG_P (reg
= XEXP (op
, 0))
1916 && REGNO (reg
) <= regno
1917 && END_HARD_REGNO (reg
) > regno
)
1925 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1926 stored as the pointer to the next register note. */
1929 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
1937 case REG_LABEL_TARGET
:
1938 case REG_LABEL_OPERAND
:
1940 /* These types of register notes use an INSN_LIST rather than an
1941 EXPR_LIST, so that copying is done right and dumps look
1943 note
= alloc_INSN_LIST (datum
, list
);
1944 PUT_REG_NOTE_KIND (note
, kind
);
1948 note
= alloc_EXPR_LIST (kind
, datum
, list
);
1955 /* Add register note with kind KIND and datum DATUM to INSN. */
1958 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
1960 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
1963 /* Remove register note NOTE from the REG_NOTES of INSN. */
1966 remove_note (rtx insn
, const_rtx note
)
1970 if (note
== NULL_RTX
)
1973 if (REG_NOTES (insn
) == note
)
1974 REG_NOTES (insn
) = XEXP (note
, 1);
1976 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1977 if (XEXP (link
, 1) == note
)
1979 XEXP (link
, 1) = XEXP (note
, 1);
1983 switch (REG_NOTE_KIND (note
))
1987 df_notes_rescan (insn
);
1994 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1997 remove_reg_equal_equiv_notes (rtx insn
)
2001 loc
= ®_NOTES (insn
);
2004 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2005 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2006 *loc
= XEXP (*loc
, 1);
2008 loc
= &XEXP (*loc
, 1);
2012 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2015 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2022 /* This loop is a little tricky. We cannot just go down the chain because
2023 it is being modified by some actions in the loop. So we just iterate
2024 over the head. We plan to drain the list anyway. */
2025 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2027 rtx insn
= DF_REF_INSN (eq_use
);
2028 rtx note
= find_reg_equal_equiv_note (insn
);
2030 /* This assert is generally triggered when someone deletes a REG_EQUAL
2031 or REG_EQUIV note by hacking the list manually rather than calling
2035 remove_note (insn
, note
);
2039 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2040 return 1 if it is found. A simple equality test is used to determine if
2044 in_expr_list_p (const_rtx listp
, const_rtx node
)
2048 for (x
= listp
; x
; x
= XEXP (x
, 1))
2049 if (node
== XEXP (x
, 0))
2055 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2056 remove that entry from the list if it is found.
2058 A simple equality test is used to determine if NODE matches. */
2061 remove_node_from_expr_list (const_rtx node
, rtx
*listp
)
2064 rtx prev
= NULL_RTX
;
2068 if (node
== XEXP (temp
, 0))
2070 /* Splice the node out of the list. */
2072 XEXP (prev
, 1) = XEXP (temp
, 1);
2074 *listp
= XEXP (temp
, 1);
2080 temp
= XEXP (temp
, 1);
2084 /* Nonzero if X contains any volatile instructions. These are instructions
2085 which may cause unpredictable machine state instructions, and thus no
2086 instructions should be moved or combined across them. This includes
2087 only volatile asms and UNSPEC_VOLATILE instructions. */
2090 volatile_insn_p (const_rtx x
)
2092 const RTX_CODE code
= GET_CODE (x
);
2110 case UNSPEC_VOLATILE
:
2111 /* case TRAP_IF: This isn't clear yet. */
2116 if (MEM_VOLATILE_P (x
))
2123 /* Recursively scan the operands of this expression. */
2126 const char *const fmt
= GET_RTX_FORMAT (code
);
2129 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2133 if (volatile_insn_p (XEXP (x
, i
)))
2136 else if (fmt
[i
] == 'E')
2139 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2140 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2148 /* Nonzero if X contains any volatile memory references
2149 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2152 volatile_refs_p (const_rtx x
)
2154 const RTX_CODE code
= GET_CODE (x
);
2170 case UNSPEC_VOLATILE
:
2176 if (MEM_VOLATILE_P (x
))
2183 /* Recursively scan the operands of this expression. */
2186 const char *const fmt
= GET_RTX_FORMAT (code
);
2189 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2193 if (volatile_refs_p (XEXP (x
, i
)))
2196 else if (fmt
[i
] == 'E')
2199 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2200 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2208 /* Similar to above, except that it also rejects register pre- and post-
2212 side_effects_p (const_rtx x
)
2214 const RTX_CODE code
= GET_CODE (x
);
2231 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2232 when some combination can't be done. If we see one, don't think
2233 that we can simplify the expression. */
2234 return (GET_MODE (x
) != VOIDmode
);
2243 case UNSPEC_VOLATILE
:
2244 /* case TRAP_IF: This isn't clear yet. */
2250 if (MEM_VOLATILE_P (x
))
2257 /* Recursively scan the operands of this expression. */
2260 const char *fmt
= GET_RTX_FORMAT (code
);
2263 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2267 if (side_effects_p (XEXP (x
, i
)))
2270 else if (fmt
[i
] == 'E')
2273 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2274 if (side_effects_p (XVECEXP (x
, i
, j
)))
2282 /* Return nonzero if evaluating rtx X might cause a trap.
2283 FLAGS controls how to consider MEMs. A nonzero means the context
2284 of the access may have changed from the original, such that the
2285 address may have become invalid. */
2288 may_trap_p_1 (const_rtx x
, unsigned flags
)
2294 /* We make no distinction currently, but this function is part of
2295 the internal target-hooks ABI so we keep the parameter as
2296 "unsigned flags". */
2297 bool code_changed
= flags
!= 0;
2301 code
= GET_CODE (x
);
2304 /* Handle these cases quickly. */
2316 case UNSPEC_VOLATILE
:
2317 return targetm
.unspec_may_trap_p (x
, flags
);
2324 return MEM_VOLATILE_P (x
);
2326 /* Memory ref can trap unless it's a static var or a stack slot. */
2328 /* Recognize specific pattern of stack checking probes. */
2329 if (flag_stack_check
2330 && MEM_VOLATILE_P (x
)
2331 && XEXP (x
, 0) == stack_pointer_rtx
)
2333 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2334 reference; moving it out of context such as when moving code
2335 when optimizing, might cause its address to become invalid. */
2337 || !MEM_NOTRAP_P (x
))
2339 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2340 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2341 GET_MODE (x
), code_changed
);
2346 /* Division by a non-constant might trap. */
2351 if (HONOR_SNANS (GET_MODE (x
)))
2353 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2354 return flag_trapping_math
;
2355 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2360 /* An EXPR_LIST is used to represent a function call. This
2361 certainly may trap. */
2370 /* Some floating point comparisons may trap. */
2371 if (!flag_trapping_math
)
2373 /* ??? There is no machine independent way to check for tests that trap
2374 when COMPARE is used, though many targets do make this distinction.
2375 For instance, sparc uses CCFPE for compares which generate exceptions
2376 and CCFP for compares which do not generate exceptions. */
2377 if (HONOR_NANS (GET_MODE (x
)))
2379 /* But often the compare has some CC mode, so check operand
2381 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2382 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2388 if (HONOR_SNANS (GET_MODE (x
)))
2390 /* Often comparison is CC mode, so check operand modes. */
2391 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2392 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2397 /* Conversion of floating point might trap. */
2398 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2405 /* These operations don't trap even with floating point. */
2409 /* Any floating arithmetic may trap. */
2410 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
))
2411 && flag_trapping_math
)
2415 fmt
= GET_RTX_FORMAT (code
);
2416 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2420 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2423 else if (fmt
[i
] == 'E')
2426 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2427 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2434 /* Return nonzero if evaluating rtx X might cause a trap. */
2437 may_trap_p (const_rtx x
)
2439 return may_trap_p_1 (x
, 0);
2442 /* Same as above, but additionally return nonzero if evaluating rtx X might
2443 cause a fault. We define a fault for the purpose of this function as a
2444 erroneous execution condition that cannot be encountered during the normal
2445 execution of a valid program; the typical example is an unaligned memory
2446 access on a strict alignment machine. The compiler guarantees that it
2447 doesn't generate code that will fault from a valid program, but this
2448 guarantee doesn't mean anything for individual instructions. Consider
2449 the following example:
2451 struct S { int d; union { char *cp; int *ip; }; };
2453 int foo(struct S *s)
2461 on a strict alignment machine. In a valid program, foo will never be
2462 invoked on a structure for which d is equal to 1 and the underlying
2463 unique field of the union not aligned on a 4-byte boundary, but the
2464 expression *s->ip might cause a fault if considered individually.
2466 At the RTL level, potentially problematic expressions will almost always
2467 verify may_trap_p; for example, the above dereference can be emitted as
2468 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2469 However, suppose that foo is inlined in a caller that causes s->cp to
2470 point to a local character variable and guarantees that s->d is not set
2471 to 1; foo may have been effectively translated into pseudo-RTL as:
2474 (set (reg:SI) (mem:SI (%fp - 7)))
2476 (set (reg:QI) (mem:QI (%fp - 7)))
2478 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2479 memory reference to a stack slot, but it will certainly cause a fault
2480 on a strict alignment machine. */
2483 may_trap_or_fault_p (const_rtx x
)
2485 return may_trap_p_1 (x
, 1);
2488 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2489 i.e., an inequality. */
2492 inequality_comparisons_p (const_rtx x
)
2496 const enum rtx_code code
= GET_CODE (x
);
2524 len
= GET_RTX_LENGTH (code
);
2525 fmt
= GET_RTX_FORMAT (code
);
2527 for (i
= 0; i
< len
; i
++)
2531 if (inequality_comparisons_p (XEXP (x
, i
)))
2534 else if (fmt
[i
] == 'E')
2537 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2538 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2546 /* Replace any occurrence of FROM in X with TO. The function does
2547 not enter into CONST_DOUBLE for the replace.
2549 Note that copying is not done so X must not be shared unless all copies
2550 are to be modified. */
2553 replace_rtx (rtx x
, rtx from
, rtx to
)
2561 /* Allow this function to make replacements in EXPR_LISTs. */
2565 if (GET_CODE (x
) == SUBREG
)
2567 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2569 if (CONST_INT_P (new_rtx
))
2571 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2572 GET_MODE (SUBREG_REG (x
)),
2577 SUBREG_REG (x
) = new_rtx
;
2581 else if (GET_CODE (x
) == ZERO_EXTEND
)
2583 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2585 if (CONST_INT_P (new_rtx
))
2587 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2588 new_rtx
, GET_MODE (XEXP (x
, 0)));
2592 XEXP (x
, 0) = new_rtx
;
2597 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2598 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2601 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2602 else if (fmt
[i
] == 'E')
2603 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2604 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2610 /* Replace occurrences of the old label in *X with the new one.
2611 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2614 replace_label (rtx
*x
, void *data
)
2617 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2618 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2619 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2624 if (GET_CODE (l
) == SYMBOL_REF
2625 && CONSTANT_POOL_ADDRESS_P (l
))
2627 rtx c
= get_pool_constant (l
);
2628 if (rtx_referenced_p (old_label
, c
))
2631 replace_label_data
*d
= (replace_label_data
*) data
;
2633 /* Create a copy of constant C; replace the label inside
2634 but do not update LABEL_NUSES because uses in constant pool
2636 new_c
= copy_rtx (c
);
2637 d
->update_label_nuses
= false;
2638 for_each_rtx (&new_c
, replace_label
, data
);
2639 d
->update_label_nuses
= update_label_nuses
;
2641 /* Add the new constant NEW_C to constant pool and replace
2642 the old reference to constant by new reference. */
2643 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2644 *x
= replace_rtx (l
, l
, new_l
);
2649 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2650 field. This is not handled by for_each_rtx because it doesn't
2651 handle unprinted ('0') fields. */
2652 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2653 JUMP_LABEL (l
) = new_label
;
2655 if ((GET_CODE (l
) == LABEL_REF
2656 || GET_CODE (l
) == INSN_LIST
)
2657 && XEXP (l
, 0) == old_label
)
2659 XEXP (l
, 0) = new_label
;
2660 if (update_label_nuses
)
2662 ++LABEL_NUSES (new_label
);
2663 --LABEL_NUSES (old_label
);
2671 /* When *BODY is equal to X or X is directly referenced by *BODY
2672 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2673 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2676 rtx_referenced_p_1 (rtx
*body
, void *x
)
2680 if (*body
== NULL_RTX
)
2681 return y
== NULL_RTX
;
2683 /* Return true if a label_ref *BODY refers to label Y. */
2684 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2685 return XEXP (*body
, 0) == y
;
2687 /* If *BODY is a reference to pool constant traverse the constant. */
2688 if (GET_CODE (*body
) == SYMBOL_REF
2689 && CONSTANT_POOL_ADDRESS_P (*body
))
2690 return rtx_referenced_p (y
, get_pool_constant (*body
));
2692 /* By default, compare the RTL expressions. */
2693 return rtx_equal_p (*body
, y
);
2696 /* Return true if X is referenced in BODY. */
2699 rtx_referenced_p (rtx x
, rtx body
)
2701 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2704 /* If INSN is a tablejump return true and store the label (before jump table) to
2705 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2708 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx
*tablep
)
2715 label
= JUMP_LABEL (insn
);
2716 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2717 && (table
= next_active_insn (label
)) != NULL_RTX
2718 && JUMP_TABLE_DATA_P (table
))
2729 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2730 constant that is not in the constant pool and not in the condition
2731 of an IF_THEN_ELSE. */
2734 computed_jump_p_1 (const_rtx x
)
2736 const enum rtx_code code
= GET_CODE (x
);
2753 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2754 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2757 return (computed_jump_p_1 (XEXP (x
, 1))
2758 || computed_jump_p_1 (XEXP (x
, 2)));
2764 fmt
= GET_RTX_FORMAT (code
);
2765 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2768 && computed_jump_p_1 (XEXP (x
, i
)))
2771 else if (fmt
[i
] == 'E')
2772 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2773 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2780 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2782 Tablejumps and casesi insns are not considered indirect jumps;
2783 we can recognize them by a (use (label_ref)). */
2786 computed_jump_p (const_rtx insn
)
2791 rtx pat
= PATTERN (insn
);
2793 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2794 if (JUMP_LABEL (insn
) != NULL
)
2797 if (GET_CODE (pat
) == PARALLEL
)
2799 int len
= XVECLEN (pat
, 0);
2800 int has_use_labelref
= 0;
2802 for (i
= len
- 1; i
>= 0; i
--)
2803 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2804 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2806 has_use_labelref
= 1;
2808 if (! has_use_labelref
)
2809 for (i
= len
- 1; i
>= 0; i
--)
2810 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2811 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2812 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2815 else if (GET_CODE (pat
) == SET
2816 && SET_DEST (pat
) == pc_rtx
2817 && computed_jump_p_1 (SET_SRC (pat
)))
2823 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2824 calls. Processes the subexpressions of EXP and passes them to F. */
2826 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2829 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2832 for (; format
[n
] != '\0'; n
++)
2839 result
= (*f
) (x
, data
);
2841 /* Do not traverse sub-expressions. */
2843 else if (result
!= 0)
2844 /* Stop the traversal. */
2848 /* There are no sub-expressions. */
2851 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2854 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2862 if (XVEC (exp
, n
) == 0)
2864 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2867 x
= &XVECEXP (exp
, n
, j
);
2868 result
= (*f
) (x
, data
);
2870 /* Do not traverse sub-expressions. */
2872 else if (result
!= 0)
2873 /* Stop the traversal. */
2877 /* There are no sub-expressions. */
2880 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2883 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2891 /* Nothing to do. */
2899 /* Traverse X via depth-first search, calling F for each
2900 sub-expression (including X itself). F is also passed the DATA.
2901 If F returns -1, do not traverse sub-expressions, but continue
2902 traversing the rest of the tree. If F ever returns any other
2903 nonzero value, stop the traversal, and return the value returned
2904 by F. Otherwise, return 0. This function does not traverse inside
2905 tree structure that contains RTX_EXPRs, or into sub-expressions
2906 whose format code is `0' since it is not known whether or not those
2907 codes are actually RTL.
2909 This routine is very general, and could (should?) be used to
2910 implement many of the other routines in this file. */
2913 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2919 result
= (*f
) (x
, data
);
2921 /* Do not traverse sub-expressions. */
2923 else if (result
!= 0)
2924 /* Stop the traversal. */
2928 /* There are no sub-expressions. */
2931 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2935 return for_each_rtx_1 (*x
, i
, f
, data
);
2940 /* Data structure that holds the internal state communicated between
2941 for_each_inc_dec, for_each_inc_dec_find_mem and
2942 for_each_inc_dec_find_inc_dec. */
2944 struct for_each_inc_dec_ops
{
2945 /* The function to be called for each autoinc operation found. */
2946 for_each_inc_dec_fn fn
;
2947 /* The opaque argument to be passed to it. */
2949 /* The MEM we're visiting, if any. */
2953 static int for_each_inc_dec_find_mem (rtx
*r
, void *d
);
2955 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2956 operands of the equivalent add insn and pass the result to the
2957 operator specified by *D. */
2960 for_each_inc_dec_find_inc_dec (rtx
*r
, void *d
)
2963 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*)d
;
2965 switch (GET_CODE (x
))
2970 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
2971 rtx r1
= XEXP (x
, 0);
2972 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
2973 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
2979 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
2980 rtx r1
= XEXP (x
, 0);
2981 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
2982 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
2988 rtx r1
= XEXP (x
, 0);
2989 rtx add
= XEXP (x
, 1);
2990 return data
->fn (data
->mem
, x
, r1
, add
, NULL
, data
->arg
);
2995 rtx save
= data
->mem
;
2996 int ret
= for_each_inc_dec_find_mem (r
, d
);
3006 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3007 address, extract the operands of the equivalent add insn and pass
3008 the result to the operator specified by *D. */
3011 for_each_inc_dec_find_mem (rtx
*r
, void *d
)
3014 if (x
!= NULL_RTX
&& MEM_P (x
))
3016 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*) d
;
3021 result
= for_each_rtx (&XEXP (x
, 0), for_each_inc_dec_find_inc_dec
,
3031 /* Traverse *X looking for MEMs, and for autoinc operations within
3032 them. For each such autoinc operation found, call FN, passing it
3033 the innermost enclosing MEM, the operation itself, the RTX modified
3034 by the operation, two RTXs (the second may be NULL) that, once
3035 added, represent the value to be held by the modified RTX
3036 afterwards, and ARG. FN is to return -1 to skip looking for other
3037 autoinc operations within the visited operation, 0 to continue the
3038 traversal, or any other value to have it returned to the caller of
3039 for_each_inc_dec. */
3042 for_each_inc_dec (rtx
*x
,
3043 for_each_inc_dec_fn fn
,
3046 struct for_each_inc_dec_ops data
;
3052 return for_each_rtx (x
, for_each_inc_dec_find_mem
, &data
);
3056 /* Searches X for any reference to REGNO, returning the rtx of the
3057 reference found if any. Otherwise, returns NULL_RTX. */
3060 regno_use_in (unsigned int regno
, rtx x
)
3066 if (REG_P (x
) && REGNO (x
) == regno
)
3069 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3070 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3074 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3077 else if (fmt
[i
] == 'E')
3078 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3079 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3086 /* Return a value indicating whether OP, an operand of a commutative
3087 operation, is preferred as the first or second operand. The higher
3088 the value, the stronger the preference for being the first operand.
3089 We use negative values to indicate a preference for the first operand
3090 and positive values for the second operand. */
3093 commutative_operand_precedence (rtx op
)
3095 enum rtx_code code
= GET_CODE (op
);
3097 /* Constants always come the second operand. Prefer "nice" constants. */
3098 if (code
== CONST_INT
)
3100 if (code
== CONST_DOUBLE
)
3102 if (code
== CONST_FIXED
)
3104 op
= avoid_constant_pool_reference (op
);
3105 code
= GET_CODE (op
);
3107 switch (GET_RTX_CLASS (code
))
3110 if (code
== CONST_INT
)
3112 if (code
== CONST_DOUBLE
)
3114 if (code
== CONST_FIXED
)
3119 /* SUBREGs of objects should come second. */
3120 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3125 /* Complex expressions should be the first, so decrease priority
3126 of objects. Prefer pointer objects over non pointer objects. */
3127 if ((REG_P (op
) && REG_POINTER (op
))
3128 || (MEM_P (op
) && MEM_POINTER (op
)))
3132 case RTX_COMM_ARITH
:
3133 /* Prefer operands that are themselves commutative to be first.
3134 This helps to make things linear. In particular,
3135 (and (and (reg) (reg)) (not (reg))) is canonical. */
3139 /* If only one operand is a binary expression, it will be the first
3140 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3141 is canonical, although it will usually be further simplified. */
3145 /* Then prefer NEG and NOT. */
3146 if (code
== NEG
|| code
== NOT
)
3154 /* Return 1 iff it is necessary to swap operands of commutative operation
3155 in order to canonicalize expression. */
3158 swap_commutative_operands_p (rtx x
, rtx y
)
3160 return (commutative_operand_precedence (x
)
3161 < commutative_operand_precedence (y
));
3164 /* Return 1 if X is an autoincrement side effect and the register is
3165 not the stack pointer. */
3167 auto_inc_p (const_rtx x
)
3169 switch (GET_CODE (x
))
3177 /* There are no REG_INC notes for SP. */
3178 if (XEXP (x
, 0) != stack_pointer_rtx
)
3186 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3188 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3197 code
= GET_CODE (in
);
3198 fmt
= GET_RTX_FORMAT (code
);
3199 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3203 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3206 else if (fmt
[i
] == 'E')
3207 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3208 if (loc
== &XVECEXP (in
, i
, j
)
3209 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3215 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3216 and SUBREG_BYTE, return the bit offset where the subreg begins
3217 (counting from the least significant bit of the operand). */
3220 subreg_lsb_1 (enum machine_mode outer_mode
,
3221 enum machine_mode inner_mode
,
3222 unsigned int subreg_byte
)
3224 unsigned int bitpos
;
3228 /* A paradoxical subreg begins at bit position 0. */
3229 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3232 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3233 /* If the subreg crosses a word boundary ensure that
3234 it also begins and ends on a word boundary. */
3235 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3236 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3237 && (subreg_byte
% UNITS_PER_WORD
3238 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3240 if (WORDS_BIG_ENDIAN
)
3241 word
= (GET_MODE_SIZE (inner_mode
)
3242 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3244 word
= subreg_byte
/ UNITS_PER_WORD
;
3245 bitpos
= word
* BITS_PER_WORD
;
3247 if (BYTES_BIG_ENDIAN
)
3248 byte
= (GET_MODE_SIZE (inner_mode
)
3249 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3251 byte
= subreg_byte
% UNITS_PER_WORD
;
3252 bitpos
+= byte
* BITS_PER_UNIT
;
3257 /* Given a subreg X, return the bit offset where the subreg begins
3258 (counting from the least significant bit of the reg). */
3261 subreg_lsb (const_rtx x
)
3263 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3267 /* Fill in information about a subreg of a hard register.
3268 xregno - A regno of an inner hard subreg_reg (or what will become one).
3269 xmode - The mode of xregno.
3270 offset - The byte offset.
3271 ymode - The mode of a top level SUBREG (or what may become one).
3272 info - Pointer to structure to fill in. */
3274 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3275 unsigned int offset
, enum machine_mode ymode
,
3276 struct subreg_info
*info
)
3278 int nregs_xmode
, nregs_ymode
;
3279 int mode_multiple
, nregs_multiple
;
3280 int offset_adj
, y_offset
, y_offset_adj
;
3281 int regsize_xmode
, regsize_ymode
;
3284 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3288 /* If there are holes in a non-scalar mode in registers, we expect
3289 that it is made up of its units concatenated together. */
3290 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3292 enum machine_mode xmode_unit
;
3294 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3295 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3298 xmode_unit
= GET_MODE_INNER (xmode
);
3299 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3300 gcc_assert (nregs_xmode
3301 == (GET_MODE_NUNITS (xmode
)
3302 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3303 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3304 == (hard_regno_nregs
[xregno
][xmode_unit
]
3305 * GET_MODE_NUNITS (xmode
)));
3307 /* You can only ask for a SUBREG of a value with holes in the middle
3308 if you don't cross the holes. (Such a SUBREG should be done by
3309 picking a different register class, or doing it in memory if
3310 necessary.) An example of a value with holes is XCmode on 32-bit
3311 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3312 3 for each part, but in memory it's two 128-bit parts.
3313 Padding is assumed to be at the end (not necessarily the 'high part')
3315 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3316 < GET_MODE_NUNITS (xmode
))
3317 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3318 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3319 / GET_MODE_SIZE (xmode_unit
))))
3321 info
->representable_p
= false;
3326 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3328 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3330 /* Paradoxical subregs are otherwise valid. */
3333 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3335 info
->representable_p
= true;
3336 /* If this is a big endian paradoxical subreg, which uses more
3337 actual hard registers than the original register, we must
3338 return a negative offset so that we find the proper highpart
3340 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3341 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3342 info
->offset
= nregs_xmode
- nregs_ymode
;
3345 info
->nregs
= nregs_ymode
;
3349 /* If registers store different numbers of bits in the different
3350 modes, we cannot generally form this subreg. */
3351 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3352 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3353 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3354 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3356 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3357 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3358 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3360 info
->representable_p
= false;
3362 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3363 info
->offset
= offset
/ regsize_xmode
;
3366 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3368 info
->representable_p
= false;
3370 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3371 info
->offset
= offset
/ regsize_xmode
;
3376 /* Lowpart subregs are otherwise valid. */
3377 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3379 info
->representable_p
= true;
3382 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3385 info
->nregs
= nregs_ymode
;
3390 /* This should always pass, otherwise we don't know how to verify
3391 the constraint. These conditions may be relaxed but
3392 subreg_regno_offset would need to be redesigned. */
3393 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3394 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3396 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3397 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3399 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3400 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3401 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3402 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3403 offset
= (xsize
- ysize
- off_high
) | off_low
;
3405 /* The XMODE value can be seen as a vector of NREGS_XMODE
3406 values. The subreg must represent a lowpart of given field.
3407 Compute what field it is. */
3408 offset_adj
= offset
;
3409 offset_adj
-= subreg_lowpart_offset (ymode
,
3410 mode_for_size (GET_MODE_BITSIZE (xmode
)
3414 /* Size of ymode must not be greater than the size of xmode. */
3415 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3416 gcc_assert (mode_multiple
!= 0);
3418 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3419 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3420 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3422 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3423 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3427 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3430 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3431 info
->nregs
= nregs_ymode
;
3434 /* This function returns the regno offset of a subreg expression.
3435 xregno - A regno of an inner hard subreg_reg (or what will become one).
3436 xmode - The mode of xregno.
3437 offset - The byte offset.
3438 ymode - The mode of a top level SUBREG (or what may become one).
3439 RETURN - The regno offset which would be used. */
3441 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3442 unsigned int offset
, enum machine_mode ymode
)
3444 struct subreg_info info
;
3445 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3449 /* This function returns true when the offset is representable via
3450 subreg_offset in the given regno.
3451 xregno - A regno of an inner hard subreg_reg (or what will become one).
3452 xmode - The mode of xregno.
3453 offset - The byte offset.
3454 ymode - The mode of a top level SUBREG (or what may become one).
3455 RETURN - Whether the offset is representable. */
3457 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3458 unsigned int offset
, enum machine_mode ymode
)
3460 struct subreg_info info
;
3461 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3462 return info
.representable_p
;
3465 /* Return the number of a YMODE register to which
3467 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3469 can be simplified. Return -1 if the subreg can't be simplified.
3471 XREGNO is a hard register number. */
3474 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3475 unsigned int offset
, enum machine_mode ymode
)
3477 struct subreg_info info
;
3478 unsigned int yregno
;
3480 #ifdef CANNOT_CHANGE_MODE_CLASS
3481 /* Give the backend a chance to disallow the mode change. */
3482 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3483 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3484 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
))
3488 /* We shouldn't simplify stack-related registers. */
3489 if ((!reload_completed
|| frame_pointer_needed
)
3490 && xregno
== FRAME_POINTER_REGNUM
)
3493 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3494 && xregno
== ARG_POINTER_REGNUM
)
3497 if (xregno
== STACK_POINTER_REGNUM
)
3500 /* Try to get the register offset. */
3501 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3502 if (!info
.representable_p
)
3505 /* Make sure that the offsetted register value is in range. */
3506 yregno
= xregno
+ info
.offset
;
3507 if (!HARD_REGISTER_NUM_P (yregno
))
3510 /* See whether (reg:YMODE YREGNO) is valid.
3512 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3513 This is a kludge to work around how complex FP arguments are passed
3514 on IA-64 and should be fixed. See PR target/49226. */
3515 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3516 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3519 return (int) yregno
;
3522 /* Return the final regno that a subreg expression refers to. */
3524 subreg_regno (const_rtx x
)
3527 rtx subreg
= SUBREG_REG (x
);
3528 int regno
= REGNO (subreg
);
3530 ret
= regno
+ subreg_regno_offset (regno
,
3538 /* Return the number of registers that a subreg expression refers
3541 subreg_nregs (const_rtx x
)
3543 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3546 /* Return the number of registers that a subreg REG with REGNO
3547 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3548 changed so that the regno can be passed in. */
3551 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3553 struct subreg_info info
;
3554 rtx subreg
= SUBREG_REG (x
);
3556 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3562 struct parms_set_data
3568 /* Helper function for noticing stores to parameter registers. */
3570 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3572 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3573 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3574 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3576 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3581 /* Look backward for first parameter to be loaded.
3582 Note that loads of all parameters will not necessarily be
3583 found if CSE has eliminated some of them (e.g., an argument
3584 to the outer function is passed down as a parameter).
3585 Do not skip BOUNDARY. */
3587 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3589 struct parms_set_data parm
;
3590 rtx p
, before
, first_set
;
3592 /* Since different machines initialize their parameter registers
3593 in different orders, assume nothing. Collect the set of all
3594 parameter registers. */
3595 CLEAR_HARD_REG_SET (parm
.regs
);
3597 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3598 if (GET_CODE (XEXP (p
, 0)) == USE
3599 && REG_P (XEXP (XEXP (p
, 0), 0)))
3601 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3603 /* We only care about registers which can hold function
3605 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3608 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3612 first_set
= call_insn
;
3614 /* Search backward for the first set of a register in this set. */
3615 while (parm
.nregs
&& before
!= boundary
)
3617 before
= PREV_INSN (before
);
3619 /* It is possible that some loads got CSEed from one call to
3620 another. Stop in that case. */
3621 if (CALL_P (before
))
3624 /* Our caller needs either ensure that we will find all sets
3625 (in case code has not been optimized yet), or take care
3626 for possible labels in a way by setting boundary to preceding
3628 if (LABEL_P (before
))
3630 gcc_assert (before
== boundary
);
3634 if (INSN_P (before
))
3636 int nregs_old
= parm
.nregs
;
3637 note_stores (PATTERN (before
), parms_set
, &parm
);
3638 /* If we found something that did not set a parameter reg,
3639 we're done. Do not keep going, as that might result
3640 in hoisting an insn before the setting of a pseudo
3641 that is used by the hoisted insn. */
3642 if (nregs_old
!= parm
.nregs
)
3651 /* Return true if we should avoid inserting code between INSN and preceding
3652 call instruction. */
3655 keep_with_call_p (const_rtx insn
)
3659 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3661 if (REG_P (SET_DEST (set
))
3662 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3663 && fixed_regs
[REGNO (SET_DEST (set
))]
3664 && general_operand (SET_SRC (set
), VOIDmode
))
3666 if (REG_P (SET_SRC (set
))
3667 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3668 && REG_P (SET_DEST (set
))
3669 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3671 /* There may be a stack pop just after the call and before the store
3672 of the return register. Search for the actual store when deciding
3673 if we can break or not. */
3674 if (SET_DEST (set
) == stack_pointer_rtx
)
3676 /* This CONST_CAST is okay because next_nonnote_insn just
3677 returns its argument and we assign it to a const_rtx
3679 const_rtx i2
= next_nonnote_insn (CONST_CAST_RTX(insn
));
3680 if (i2
&& keep_with_call_p (i2
))
3687 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3688 to non-complex jumps. That is, direct unconditional, conditional,
3689 and tablejumps, but not computed jumps or returns. It also does
3690 not apply to the fallthru case of a conditional jump. */
3693 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3695 rtx tmp
= JUMP_LABEL (jump_insn
);
3700 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3702 rtvec vec
= XVEC (PATTERN (tmp
),
3703 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3704 int i
, veclen
= GET_NUM_ELEM (vec
);
3706 for (i
= 0; i
< veclen
; ++i
)
3707 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3711 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3718 /* Return an estimate of the cost of computing rtx X.
3719 One use is in cse, to decide which expression to keep in the hash table.
3720 Another is in rtl generation, to pick the cheapest way to multiply.
3721 Other uses like the latter are expected in the future.
3723 X appears as operand OPNO in an expression with code OUTER_CODE.
3724 SPEED specifies whether costs optimized for speed or size should
3728 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3739 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3740 many insns, taking N times as long. */
3741 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3745 /* Compute the default costs of certain things.
3746 Note that targetm.rtx_costs can override the defaults. */
3748 code
= GET_CODE (x
);
3752 /* Multiplication has time-complexity O(N*N), where N is the
3753 number of units (translated from digits) when using
3754 schoolbook long multiplication. */
3755 total
= factor
* factor
* COSTS_N_INSNS (5);
3761 /* Similarly, complexity for schoolbook long division. */
3762 total
= factor
* factor
* COSTS_N_INSNS (7);
3765 /* Used in combine.c as a marker. */
3769 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3770 the mode for the factor. */
3771 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3776 total
= factor
* COSTS_N_INSNS (1);
3786 /* If we can't tie these modes, make this expensive. The larger
3787 the mode, the more expensive it is. */
3788 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3789 return COSTS_N_INSNS (2 + factor
);
3793 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3798 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3799 which is already in total. */
3801 fmt
= GET_RTX_FORMAT (code
);
3802 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3804 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3805 else if (fmt
[i
] == 'E')
3806 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3807 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3812 /* Fill in the structure C with information about both speed and size rtx
3813 costs for X, which is operand OPNO in an expression with code OUTER. */
3816 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3817 struct full_rtx_costs
*c
)
3819 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3820 c
->size
= rtx_cost (x
, outer
, opno
, false);
3824 /* Return cost of address expression X.
3825 Expect that X is properly formed address reference.
3827 SPEED parameter specify whether costs optimized for speed or size should
3831 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
3833 /* We may be asked for cost of various unusual addresses, such as operands
3834 of push instruction. It is not worthwhile to complicate writing
3835 of the target hook by such cases. */
3837 if (!memory_address_addr_space_p (mode
, x
, as
))
3840 return targetm
.address_cost (x
, mode
, as
, speed
);
3843 /* If the target doesn't override, compute the cost as with arithmetic. */
3846 default_address_cost (rtx x
, enum machine_mode
, addr_space_t
, bool speed
)
3848 return rtx_cost (x
, MEM
, 0, speed
);
3852 unsigned HOST_WIDE_INT
3853 nonzero_bits (const_rtx x
, enum machine_mode mode
)
3855 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3859 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
3861 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3864 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3865 It avoids exponential behavior in nonzero_bits1 when X has
3866 identical subexpressions on the first or the second level. */
3868 static unsigned HOST_WIDE_INT
3869 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3870 enum machine_mode known_mode
,
3871 unsigned HOST_WIDE_INT known_ret
)
3873 if (x
== known_x
&& mode
== known_mode
)
3876 /* Try to find identical subexpressions. If found call
3877 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3878 precomputed value for the subexpression as KNOWN_RET. */
3880 if (ARITHMETIC_P (x
))
3882 rtx x0
= XEXP (x
, 0);
3883 rtx x1
= XEXP (x
, 1);
3885 /* Check the first level. */
3887 return nonzero_bits1 (x
, mode
, x0
, mode
,
3888 cached_nonzero_bits (x0
, mode
, known_x
,
3889 known_mode
, known_ret
));
3891 /* Check the second level. */
3892 if (ARITHMETIC_P (x0
)
3893 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3894 return nonzero_bits1 (x
, mode
, x1
, mode
,
3895 cached_nonzero_bits (x1
, mode
, known_x
,
3896 known_mode
, known_ret
));
3898 if (ARITHMETIC_P (x1
)
3899 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3900 return nonzero_bits1 (x
, mode
, x0
, mode
,
3901 cached_nonzero_bits (x0
, mode
, known_x
,
3902 known_mode
, known_ret
));
3905 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3908 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3909 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3910 is less useful. We can't allow both, because that results in exponential
3911 run time recursion. There is a nullstone testcase that triggered
3912 this. This macro avoids accidental uses of num_sign_bit_copies. */
3913 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3915 /* Given an expression, X, compute which bits in X can be nonzero.
3916 We don't care about bits outside of those defined in MODE.
3918 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3919 an arithmetic operation, we can do better. */
3921 static unsigned HOST_WIDE_INT
3922 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3923 enum machine_mode known_mode
,
3924 unsigned HOST_WIDE_INT known_ret
)
3926 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3927 unsigned HOST_WIDE_INT inner_nz
;
3929 enum machine_mode inner_mode
;
3930 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
3932 /* For floating-point and vector values, assume all bits are needed. */
3933 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3934 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
3937 /* If X is wider than MODE, use its mode instead. */
3938 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
3940 mode
= GET_MODE (x
);
3941 nonzero
= GET_MODE_MASK (mode
);
3942 mode_width
= GET_MODE_PRECISION (mode
);
3945 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
3946 /* Our only callers in this case look for single bit values. So
3947 just return the mode mask. Those tests will then be false. */
3950 #ifndef WORD_REGISTER_OPERATIONS
3951 /* If MODE is wider than X, but both are a single word for both the host
3952 and target machines, we can compute this from which bits of the
3953 object might be nonzero in its own mode, taking into account the fact
3954 that on many CISC machines, accessing an object in a wider mode
3955 causes the high-order bits to become undefined. So they are
3956 not known to be zero. */
3958 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
3959 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
3960 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
3961 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
3963 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
3964 known_x
, known_mode
, known_ret
);
3965 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
3970 code
= GET_CODE (x
);
3974 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3975 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3976 all the bits above ptr_mode are known to be zero. */
3977 /* As we do not know which address space the pointer is referring to,
3978 we can do this only if the target does not support different pointer
3979 or address modes depending on the address space. */
3980 if (target_default_pointer_address_modes_p ()
3981 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
3983 nonzero
&= GET_MODE_MASK (ptr_mode
);
3986 /* Include declared information about alignment of pointers. */
3987 /* ??? We don't properly preserve REG_POINTER changes across
3988 pointer-to-integer casts, so we can't trust it except for
3989 things that we know must be pointers. See execute/960116-1.c. */
3990 if ((x
== stack_pointer_rtx
3991 || x
== frame_pointer_rtx
3992 || x
== arg_pointer_rtx
)
3993 && REGNO_POINTER_ALIGN (REGNO (x
)))
3995 unsigned HOST_WIDE_INT alignment
3996 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
3998 #ifdef PUSH_ROUNDING
3999 /* If PUSH_ROUNDING is defined, it is possible for the
4000 stack to be momentarily aligned only to that amount,
4001 so we pick the least alignment. */
4002 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4003 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4007 nonzero
&= ~(alignment
- 1);
4011 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4012 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4013 known_mode
, known_ret
,
4017 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4018 known_mode
, known_ret
);
4020 return nonzero_for_hook
;
4024 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4025 /* If X is negative in MODE, sign-extend the value. */
4027 && mode_width
< BITS_PER_WORD
4028 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4030 return UINTVAL (x
) | ((unsigned HOST_WIDE_INT
) (-1) << mode_width
);
4036 #ifdef LOAD_EXTEND_OP
4037 /* In many, if not most, RISC machines, reading a byte from memory
4038 zeros the rest of the register. Noticing that fact saves a lot
4039 of extra zero-extends. */
4040 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4041 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4046 case UNEQ
: case LTGT
:
4047 case GT
: case GTU
: case UNGT
:
4048 case LT
: case LTU
: case UNLT
:
4049 case GE
: case GEU
: case UNGE
:
4050 case LE
: case LEU
: case UNLE
:
4051 case UNORDERED
: case ORDERED
:
4052 /* If this produces an integer result, we know which bits are set.
4053 Code here used to clear bits outside the mode of X, but that is
4055 /* Mind that MODE is the mode the caller wants to look at this
4056 operation in, and not the actual operation mode. We can wind
4057 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4058 that describes the results of a vector compare. */
4059 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4060 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4061 nonzero
= STORE_FLAG_VALUE
;
4066 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4067 and num_sign_bit_copies. */
4068 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4069 == GET_MODE_PRECISION (GET_MODE (x
)))
4073 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4074 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4079 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4080 and num_sign_bit_copies. */
4081 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4082 == GET_MODE_PRECISION (GET_MODE (x
)))
4088 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4089 known_x
, known_mode
, known_ret
)
4090 & GET_MODE_MASK (mode
));
4094 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4095 known_x
, known_mode
, known_ret
);
4096 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4097 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4101 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4102 Otherwise, show all the bits in the outer mode but not the inner
4104 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4105 known_x
, known_mode
, known_ret
);
4106 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4108 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4109 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4110 inner_nz
|= (GET_MODE_MASK (mode
)
4111 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4114 nonzero
&= inner_nz
;
4118 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4119 known_x
, known_mode
, known_ret
)
4120 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4121 known_x
, known_mode
, known_ret
);
4125 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4127 unsigned HOST_WIDE_INT nonzero0
4128 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4129 known_x
, known_mode
, known_ret
);
4131 /* Don't call nonzero_bits for the second time if it cannot change
4133 if ((nonzero
& nonzero0
) != nonzero
)
4135 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4136 known_x
, known_mode
, known_ret
);
4140 case PLUS
: case MINUS
:
4142 case DIV
: case UDIV
:
4143 case MOD
: case UMOD
:
4144 /* We can apply the rules of arithmetic to compute the number of
4145 high- and low-order zero bits of these operations. We start by
4146 computing the width (position of the highest-order nonzero bit)
4147 and the number of low-order zero bits for each value. */
4149 unsigned HOST_WIDE_INT nz0
4150 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4151 known_x
, known_mode
, known_ret
);
4152 unsigned HOST_WIDE_INT nz1
4153 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4154 known_x
, known_mode
, known_ret
);
4155 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4156 int width0
= floor_log2 (nz0
) + 1;
4157 int width1
= floor_log2 (nz1
) + 1;
4158 int low0
= floor_log2 (nz0
& -nz0
);
4159 int low1
= floor_log2 (nz1
& -nz1
);
4160 unsigned HOST_WIDE_INT op0_maybe_minusp
4161 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4162 unsigned HOST_WIDE_INT op1_maybe_minusp
4163 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4164 unsigned int result_width
= mode_width
;
4170 result_width
= MAX (width0
, width1
) + 1;
4171 result_low
= MIN (low0
, low1
);
4174 result_low
= MIN (low0
, low1
);
4177 result_width
= width0
+ width1
;
4178 result_low
= low0
+ low1
;
4183 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4184 result_width
= width0
;
4189 result_width
= width0
;
4194 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4195 result_width
= MIN (width0
, width1
);
4196 result_low
= MIN (low0
, low1
);
4201 result_width
= MIN (width0
, width1
);
4202 result_low
= MIN (low0
, low1
);
4208 if (result_width
< mode_width
)
4209 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4212 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4217 if (CONST_INT_P (XEXP (x
, 1))
4218 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4219 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4223 /* If this is a SUBREG formed for a promoted variable that has
4224 been zero-extended, we know that at least the high-order bits
4225 are zero, though others might be too. */
4227 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
4228 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4229 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4230 known_x
, known_mode
, known_ret
);
4232 inner_mode
= GET_MODE (SUBREG_REG (x
));
4233 /* If the inner mode is a single word for both the host and target
4234 machines, we can compute this from which bits of the inner
4235 object might be nonzero. */
4236 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4237 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4239 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4240 known_x
, known_mode
, known_ret
);
4242 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4243 /* If this is a typical RISC machine, we only have to worry
4244 about the way loads are extended. */
4245 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4246 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4247 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4248 || !MEM_P (SUBREG_REG (x
)))
4251 /* On many CISC machines, accessing an object in a wider mode
4252 causes the high-order bits to become undefined. So they are
4253 not known to be zero. */
4254 if (GET_MODE_PRECISION (GET_MODE (x
))
4255 > GET_MODE_PRECISION (inner_mode
))
4256 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4257 & ~GET_MODE_MASK (inner_mode
));
4266 /* The nonzero bits are in two classes: any bits within MODE
4267 that aren't in GET_MODE (x) are always significant. The rest of the
4268 nonzero bits are those that are significant in the operand of
4269 the shift when shifted the appropriate number of bits. This
4270 shows that high-order bits are cleared by the right shift and
4271 low-order bits by left shifts. */
4272 if (CONST_INT_P (XEXP (x
, 1))
4273 && INTVAL (XEXP (x
, 1)) >= 0
4274 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4275 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4277 enum machine_mode inner_mode
= GET_MODE (x
);
4278 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4279 int count
= INTVAL (XEXP (x
, 1));
4280 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4281 unsigned HOST_WIDE_INT op_nonzero
4282 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4283 known_x
, known_mode
, known_ret
);
4284 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4285 unsigned HOST_WIDE_INT outer
= 0;
4287 if (mode_width
> width
)
4288 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4290 if (code
== LSHIFTRT
)
4292 else if (code
== ASHIFTRT
)
4296 /* If the sign bit may have been nonzero before the shift, we
4297 need to mark all the places it could have been copied to
4298 by the shift as possibly nonzero. */
4299 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4300 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4303 else if (code
== ASHIFT
)
4306 inner
= ((inner
<< (count
% width
)
4307 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4309 nonzero
&= (outer
| inner
);
4315 /* This is at most the number of bits in the mode. */
4316 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4320 /* If CLZ has a known value at zero, then the nonzero bits are
4321 that value, plus the number of bits in the mode minus one. */
4322 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4324 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4330 /* If CTZ has a known value at zero, then the nonzero bits are
4331 that value, plus the number of bits in the mode minus one. */
4332 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4334 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4340 /* This is at most the number of bits in the mode minus 1. */
4341 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4350 unsigned HOST_WIDE_INT nonzero_true
4351 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4352 known_x
, known_mode
, known_ret
);
4354 /* Don't call nonzero_bits for the second time if it cannot change
4356 if ((nonzero
& nonzero_true
) != nonzero
)
4357 nonzero
&= nonzero_true
4358 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4359 known_x
, known_mode
, known_ret
);
4370 /* See the macro definition above. */
4371 #undef cached_num_sign_bit_copies
4374 /* The function cached_num_sign_bit_copies is a wrapper around
4375 num_sign_bit_copies1. It avoids exponential behavior in
4376 num_sign_bit_copies1 when X has identical subexpressions on the
4377 first or the second level. */
4380 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4381 enum machine_mode known_mode
,
4382 unsigned int known_ret
)
4384 if (x
== known_x
&& mode
== known_mode
)
4387 /* Try to find identical subexpressions. If found call
4388 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4389 the precomputed value for the subexpression as KNOWN_RET. */
4391 if (ARITHMETIC_P (x
))
4393 rtx x0
= XEXP (x
, 0);
4394 rtx x1
= XEXP (x
, 1);
4396 /* Check the first level. */
4399 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4400 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4404 /* Check the second level. */
4405 if (ARITHMETIC_P (x0
)
4406 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4408 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4409 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4413 if (ARITHMETIC_P (x1
)
4414 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4416 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4417 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4422 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4425 /* Return the number of bits at the high-order end of X that are known to
4426 be equal to the sign bit. X will be used in mode MODE; if MODE is
4427 VOIDmode, X will be used in its own mode. The returned value will always
4428 be between 1 and the number of bits in MODE. */
4431 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4432 enum machine_mode known_mode
,
4433 unsigned int known_ret
)
4435 enum rtx_code code
= GET_CODE (x
);
4436 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4437 int num0
, num1
, result
;
4438 unsigned HOST_WIDE_INT nonzero
;
4440 /* If we weren't given a mode, use the mode of X. If the mode is still
4441 VOIDmode, we don't know anything. Likewise if one of the modes is
4444 if (mode
== VOIDmode
)
4445 mode
= GET_MODE (x
);
4447 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4448 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4451 /* For a smaller object, just ignore the high bits. */
4452 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4454 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4455 known_x
, known_mode
, known_ret
);
4457 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4460 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4462 #ifndef WORD_REGISTER_OPERATIONS
4463 /* If this machine does not do all register operations on the entire
4464 register and MODE is wider than the mode of X, we can say nothing
4465 at all about the high-order bits. */
4468 /* Likewise on machines that do, if the mode of the object is smaller
4469 than a word and loads of that size don't sign extend, we can say
4470 nothing about the high order bits. */
4471 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4472 #ifdef LOAD_EXTEND_OP
4473 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4484 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4485 /* If pointers extend signed and this is a pointer in Pmode, say that
4486 all the bits above ptr_mode are known to be sign bit copies. */
4487 /* As we do not know which address space the pointer is referring to,
4488 we can do this only if the target does not support different pointer
4489 or address modes depending on the address space. */
4490 if (target_default_pointer_address_modes_p ()
4491 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4492 && mode
== Pmode
&& REG_POINTER (x
))
4493 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4497 unsigned int copies_for_hook
= 1, copies
= 1;
4498 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4499 known_mode
, known_ret
,
4503 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4504 known_mode
, known_ret
);
4506 if (copies
> 1 || copies_for_hook
> 1)
4507 return MAX (copies
, copies_for_hook
);
4509 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4514 #ifdef LOAD_EXTEND_OP
4515 /* Some RISC machines sign-extend all loads of smaller than a word. */
4516 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4517 return MAX (1, ((int) bitwidth
4518 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4523 /* If the constant is negative, take its 1's complement and remask.
4524 Then see how many zero bits we have. */
4525 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4526 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4527 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4528 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4530 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4533 /* If this is a SUBREG for a promoted object that is sign-extended
4534 and we are looking at it in a wider mode, we know that at least the
4535 high-order bits are known to be sign bit copies. */
4537 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4539 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4540 known_x
, known_mode
, known_ret
);
4541 return MAX ((int) bitwidth
4542 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4546 /* For a smaller object, just ignore the high bits. */
4547 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4549 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4550 known_x
, known_mode
, known_ret
);
4551 return MAX (1, (num0
4552 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4556 #ifdef WORD_REGISTER_OPERATIONS
4557 #ifdef LOAD_EXTEND_OP
4558 /* For paradoxical SUBREGs on machines where all register operations
4559 affect the entire register, just look inside. Note that we are
4560 passing MODE to the recursive call, so the number of sign bit copies
4561 will remain relative to that mode, not the inner mode. */
4563 /* This works only if loads sign extend. Otherwise, if we get a
4564 reload for the inner part, it may be loaded from the stack, and
4565 then we lose all sign bit copies that existed before the store
4568 if (paradoxical_subreg_p (x
)
4569 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4570 && MEM_P (SUBREG_REG (x
)))
4571 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4572 known_x
, known_mode
, known_ret
);
4578 if (CONST_INT_P (XEXP (x
, 1)))
4579 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4583 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4584 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4585 known_x
, known_mode
, known_ret
));
4588 /* For a smaller object, just ignore the high bits. */
4589 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4590 known_x
, known_mode
, known_ret
);
4591 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4595 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4596 known_x
, known_mode
, known_ret
);
4598 case ROTATE
: case ROTATERT
:
4599 /* If we are rotating left by a number of bits less than the number
4600 of sign bit copies, we can just subtract that amount from the
4602 if (CONST_INT_P (XEXP (x
, 1))
4603 && INTVAL (XEXP (x
, 1)) >= 0
4604 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4606 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4607 known_x
, known_mode
, known_ret
);
4608 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4609 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4614 /* In general, this subtracts one sign bit copy. But if the value
4615 is known to be positive, the number of sign bit copies is the
4616 same as that of the input. Finally, if the input has just one bit
4617 that might be nonzero, all the bits are copies of the sign bit. */
4618 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4619 known_x
, known_mode
, known_ret
);
4620 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4621 return num0
> 1 ? num0
- 1 : 1;
4623 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4628 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4633 case IOR
: case AND
: case XOR
:
4634 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4635 /* Logical operations will preserve the number of sign-bit copies.
4636 MIN and MAX operations always return one of the operands. */
4637 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4638 known_x
, known_mode
, known_ret
);
4639 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4640 known_x
, known_mode
, known_ret
);
4642 /* If num1 is clearing some of the top bits then regardless of
4643 the other term, we are guaranteed to have at least that many
4644 high-order zero bits. */
4647 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4648 && CONST_INT_P (XEXP (x
, 1))
4649 && (UINTVAL (XEXP (x
, 1))
4650 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4653 /* Similarly for IOR when setting high-order bits. */
4656 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4657 && CONST_INT_P (XEXP (x
, 1))
4658 && (UINTVAL (XEXP (x
, 1))
4659 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4662 return MIN (num0
, num1
);
4664 case PLUS
: case MINUS
:
4665 /* For addition and subtraction, we can have a 1-bit carry. However,
4666 if we are subtracting 1 from a positive number, there will not
4667 be such a carry. Furthermore, if the positive number is known to
4668 be 0 or 1, we know the result is either -1 or 0. */
4670 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4671 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4673 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4674 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4675 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4676 : bitwidth
- floor_log2 (nonzero
) - 1);
4679 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4680 known_x
, known_mode
, known_ret
);
4681 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4682 known_x
, known_mode
, known_ret
);
4683 result
= MAX (1, MIN (num0
, num1
) - 1);
4688 /* The number of bits of the product is the sum of the number of
4689 bits of both terms. However, unless one of the terms if known
4690 to be positive, we must allow for an additional bit since negating
4691 a negative number can remove one sign bit copy. */
4693 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4694 known_x
, known_mode
, known_ret
);
4695 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4696 known_x
, known_mode
, known_ret
);
4698 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4700 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4701 || (((nonzero_bits (XEXP (x
, 0), mode
)
4702 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4703 && ((nonzero_bits (XEXP (x
, 1), mode
)
4704 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4708 return MAX (1, result
);
4711 /* The result must be <= the first operand. If the first operand
4712 has the high bit set, we know nothing about the number of sign
4714 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4716 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4717 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4720 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4721 known_x
, known_mode
, known_ret
);
4724 /* The result must be <= the second operand. If the second operand
4725 has (or just might have) the high bit set, we know nothing about
4726 the number of sign bit copies. */
4727 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4729 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4730 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4733 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4734 known_x
, known_mode
, known_ret
);
4737 /* Similar to unsigned division, except that we have to worry about
4738 the case where the divisor is negative, in which case we have
4740 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4741 known_x
, known_mode
, known_ret
);
4743 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4744 || (nonzero_bits (XEXP (x
, 1), mode
)
4745 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4751 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4752 known_x
, known_mode
, known_ret
);
4754 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4755 || (nonzero_bits (XEXP (x
, 1), mode
)
4756 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4762 /* Shifts by a constant add to the number of bits equal to the
4764 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4765 known_x
, known_mode
, known_ret
);
4766 if (CONST_INT_P (XEXP (x
, 1))
4767 && INTVAL (XEXP (x
, 1)) > 0
4768 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4769 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4774 /* Left shifts destroy copies. */
4775 if (!CONST_INT_P (XEXP (x
, 1))
4776 || INTVAL (XEXP (x
, 1)) < 0
4777 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4778 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4781 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4782 known_x
, known_mode
, known_ret
);
4783 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4786 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4787 known_x
, known_mode
, known_ret
);
4788 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4789 known_x
, known_mode
, known_ret
);
4790 return MIN (num0
, num1
);
4792 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4793 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4794 case GEU
: case GTU
: case LEU
: case LTU
:
4795 case UNORDERED
: case ORDERED
:
4796 /* If the constant is negative, take its 1's complement and remask.
4797 Then see how many zero bits we have. */
4798 nonzero
= STORE_FLAG_VALUE
;
4799 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4800 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4801 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4803 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4809 /* If we haven't been able to figure it out by one of the above rules,
4810 see if some of the high-order bits are known to be zero. If so,
4811 count those bits and return one less than that amount. If we can't
4812 safely compute the mask for this mode, always return BITWIDTH. */
4814 bitwidth
= GET_MODE_PRECISION (mode
);
4815 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4818 nonzero
= nonzero_bits (x
, mode
);
4819 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4820 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4823 /* Calculate the rtx_cost of a single instruction. A return value of
4824 zero indicates an instruction pattern without a known cost. */
4827 insn_rtx_cost (rtx pat
, bool speed
)
4832 /* Extract the single set rtx from the instruction pattern.
4833 We can't use single_set since we only have the pattern. */
4834 if (GET_CODE (pat
) == SET
)
4836 else if (GET_CODE (pat
) == PARALLEL
)
4839 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4841 rtx x
= XVECEXP (pat
, 0, i
);
4842 if (GET_CODE (x
) == SET
)
4855 cost
= set_src_cost (SET_SRC (set
), speed
);
4856 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4859 /* Given an insn INSN and condition COND, return the condition in a
4860 canonical form to simplify testing by callers. Specifically:
4862 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4863 (2) Both operands will be machine operands; (cc0) will have been replaced.
4864 (3) If an operand is a constant, it will be the second operand.
4865 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4866 for GE, GEU, and LEU.
4868 If the condition cannot be understood, or is an inequality floating-point
4869 comparison which needs to be reversed, 0 will be returned.
4871 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4873 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4874 insn used in locating the condition was found. If a replacement test
4875 of the condition is desired, it should be placed in front of that
4876 insn and we will be sure that the inputs are still valid.
4878 If WANT_REG is nonzero, we wish the condition to be relative to that
4879 register, if possible. Therefore, do not canonicalize the condition
4880 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4881 to be a compare to a CC mode register.
4883 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4887 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4888 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4895 int reverse_code
= 0;
4896 enum machine_mode mode
;
4897 basic_block bb
= BLOCK_FOR_INSN (insn
);
4899 code
= GET_CODE (cond
);
4900 mode
= GET_MODE (cond
);
4901 op0
= XEXP (cond
, 0);
4902 op1
= XEXP (cond
, 1);
4905 code
= reversed_comparison_code (cond
, insn
);
4906 if (code
== UNKNOWN
)
4912 /* If we are comparing a register with zero, see if the register is set
4913 in the previous insn to a COMPARE or a comparison operation. Perform
4914 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4917 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
4918 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
4919 && op1
== CONST0_RTX (GET_MODE (op0
))
4922 /* Set nonzero when we find something of interest. */
4926 /* If comparison with cc0, import actual comparison from compare
4930 if ((prev
= prev_nonnote_insn (prev
)) == 0
4931 || !NONJUMP_INSN_P (prev
)
4932 || (set
= single_set (prev
)) == 0
4933 || SET_DEST (set
) != cc0_rtx
)
4936 op0
= SET_SRC (set
);
4937 op1
= CONST0_RTX (GET_MODE (op0
));
4943 /* If this is a COMPARE, pick up the two things being compared. */
4944 if (GET_CODE (op0
) == COMPARE
)
4946 op1
= XEXP (op0
, 1);
4947 op0
= XEXP (op0
, 0);
4950 else if (!REG_P (op0
))
4953 /* Go back to the previous insn. Stop if it is not an INSN. We also
4954 stop if it isn't a single set or if it has a REG_INC note because
4955 we don't want to bother dealing with it. */
4957 prev
= prev_nonnote_nondebug_insn (prev
);
4960 || !NONJUMP_INSN_P (prev
)
4961 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
4962 /* In cfglayout mode, there do not have to be labels at the
4963 beginning of a block, or jumps at the end, so the previous
4964 conditions would not stop us when we reach bb boundary. */
4965 || BLOCK_FOR_INSN (prev
) != bb
)
4968 set
= set_of (op0
, prev
);
4971 && (GET_CODE (set
) != SET
4972 || !rtx_equal_p (SET_DEST (set
), op0
)))
4975 /* If this is setting OP0, get what it sets it to if it looks
4979 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
4980 #ifdef FLOAT_STORE_FLAG_VALUE
4981 REAL_VALUE_TYPE fsfv
;
4984 /* ??? We may not combine comparisons done in a CCmode with
4985 comparisons not done in a CCmode. This is to aid targets
4986 like Alpha that have an IEEE compliant EQ instruction, and
4987 a non-IEEE compliant BEQ instruction. The use of CCmode is
4988 actually artificial, simply to prevent the combination, but
4989 should not affect other platforms.
4991 However, we must allow VOIDmode comparisons to match either
4992 CCmode or non-CCmode comparison, because some ports have
4993 modeless comparisons inside branch patterns.
4995 ??? This mode check should perhaps look more like the mode check
4996 in simplify_comparison in combine. */
4998 if ((GET_CODE (SET_SRC (set
)) == COMPARE
5001 && val_signbit_known_set_p (inner_mode
,
5003 #ifdef FLOAT_STORE_FLAG_VALUE
5005 && SCALAR_FLOAT_MODE_P (inner_mode
)
5006 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5007 REAL_VALUE_NEGATIVE (fsfv
)))
5010 && COMPARISON_P (SET_SRC (set
))))
5011 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
5012 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5013 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
5015 else if (((code
== EQ
5017 && val_signbit_known_set_p (inner_mode
,
5019 #ifdef FLOAT_STORE_FLAG_VALUE
5021 && SCALAR_FLOAT_MODE_P (inner_mode
)
5022 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5023 REAL_VALUE_NEGATIVE (fsfv
)))
5026 && COMPARISON_P (SET_SRC (set
))
5027 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
5028 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5029 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
5039 else if (reg_set_p (op0
, prev
))
5040 /* If this sets OP0, but not directly, we have to give up. */
5045 /* If the caller is expecting the condition to be valid at INSN,
5046 make sure X doesn't change before INSN. */
5047 if (valid_at_insn_p
)
5048 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5050 if (COMPARISON_P (x
))
5051 code
= GET_CODE (x
);
5054 code
= reversed_comparison_code (x
, prev
);
5055 if (code
== UNKNOWN
)
5060 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5066 /* If constant is first, put it last. */
5067 if (CONSTANT_P (op0
))
5068 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5070 /* If OP0 is the result of a comparison, we weren't able to find what
5071 was really being compared, so fail. */
5073 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5076 /* Canonicalize any ordered comparison with integers involving equality
5077 if we can do computations in the relevant mode and we do not
5080 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5081 && CONST_INT_P (op1
)
5082 && GET_MODE (op0
) != VOIDmode
5083 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5085 HOST_WIDE_INT const_val
= INTVAL (op1
);
5086 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5087 unsigned HOST_WIDE_INT max_val
5088 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5093 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5094 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5097 /* When cross-compiling, const_val might be sign-extended from
5098 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5100 if ((const_val
& max_val
)
5101 != ((unsigned HOST_WIDE_INT
) 1
5102 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5103 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5107 if (uconst_val
< max_val
)
5108 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5112 if (uconst_val
!= 0)
5113 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5121 /* Never return CC0; return zero instead. */
5125 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5128 /* Given a jump insn JUMP, return the condition that will cause it to branch
5129 to its JUMP_LABEL. If the condition cannot be understood, or is an
5130 inequality floating-point comparison which needs to be reversed, 0 will
5133 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5134 insn used in locating the condition was found. If a replacement test
5135 of the condition is desired, it should be placed in front of that
5136 insn and we will be sure that the inputs are still valid. If EARLIEST
5137 is null, the returned condition will be valid at INSN.
5139 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5140 compare CC mode register.
5142 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5145 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
5151 /* If this is not a standard conditional jump, we can't parse it. */
5153 || ! any_condjump_p (jump
))
5155 set
= pc_set (jump
);
5157 cond
= XEXP (SET_SRC (set
), 0);
5159 /* If this branches to JUMP_LABEL when the condition is false, reverse
5162 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5163 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
5165 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5166 allow_cc_mode
, valid_at_insn_p
);
5169 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5170 TARGET_MODE_REP_EXTENDED.
5172 Note that we assume that the property of
5173 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5174 narrower than mode B. I.e., if A is a mode narrower than B then in
5175 order to be able to operate on it in mode B, mode A needs to
5176 satisfy the requirements set by the representation of mode B. */
5179 init_num_sign_bit_copies_in_rep (void)
5181 enum machine_mode mode
, in_mode
;
5183 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5184 in_mode
= GET_MODE_WIDER_MODE (mode
))
5185 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5186 mode
= GET_MODE_WIDER_MODE (mode
))
5188 enum machine_mode i
;
5190 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5191 extends to the next widest mode. */
5192 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5193 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5195 /* We are in in_mode. Count how many bits outside of mode
5196 have to be copies of the sign-bit. */
5197 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5199 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5201 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5202 /* We can only check sign-bit copies starting from the
5203 top-bit. In order to be able to check the bits we
5204 have already seen we pretend that subsequent bits
5205 have to be sign-bit copies too. */
5206 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5207 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5208 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5213 /* Suppose that truncation from the machine mode of X to MODE is not a
5214 no-op. See if there is anything special about X so that we can
5215 assume it already contains a truncated value of MODE. */
5218 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5220 /* This register has already been used in MODE without explicit
5222 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5225 /* See if we already satisfy the requirements of MODE. If yes we
5226 can just switch to MODE. */
5227 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5228 && (num_sign_bit_copies (x
, GET_MODE (x
))
5229 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5235 /* Initialize non_rtx_starting_operands, which is used to speed up
5241 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5243 const char *format
= GET_RTX_FORMAT (i
);
5244 const char *first
= strpbrk (format
, "eEV");
5245 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5248 init_num_sign_bit_copies_in_rep ();
5251 /* Check whether this is a constant pool constant. */
5253 constant_pool_constant_p (rtx x
)
5255 x
= avoid_constant_pool_reference (x
);
5256 return CONST_DOUBLE_P (x
);
5259 /* If M is a bitmask that selects a field of low-order bits within an item but
5260 not the entire word, return the length of the field. Return -1 otherwise.
5261 M is used in machine mode MODE. */
5264 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5266 if (mode
!= VOIDmode
)
5268 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5270 m
&= GET_MODE_MASK (mode
);
5273 return exact_log2 (m
+ 1);
5276 /* Return the mode of MEM's address. */
5279 get_address_mode (rtx mem
)
5281 enum machine_mode mode
;
5283 gcc_assert (MEM_P (mem
));
5284 mode
= GET_MODE (XEXP (mem
, 0));
5285 if (mode
!= VOIDmode
)
5287 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5290 /* Split up a CONST_DOUBLE or integer constant rtx
5291 into two rtx's for single words,
5292 storing in *FIRST the word that comes first in memory in the target
5293 and in *SECOND the other. */
5296 split_double (rtx value
, rtx
*first
, rtx
*second
)
5298 if (CONST_INT_P (value
))
5300 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5302 /* In this case the CONST_INT holds both target words.
5303 Extract the bits from it into two word-sized pieces.
5304 Sign extend each half to HOST_WIDE_INT. */
5305 unsigned HOST_WIDE_INT low
, high
;
5306 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5307 unsigned bits_per_word
= BITS_PER_WORD
;
5309 /* Set sign_bit to the most significant bit of a word. */
5311 sign_bit
<<= bits_per_word
- 1;
5313 /* Set mask so that all bits of the word are set. We could
5314 have used 1 << BITS_PER_WORD instead of basing the
5315 calculation on sign_bit. However, on machines where
5316 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5317 compiler warning, even though the code would never be
5319 mask
= sign_bit
<< 1;
5322 /* Set sign_extend as any remaining bits. */
5323 sign_extend
= ~mask
;
5325 /* Pick the lower word and sign-extend it. */
5326 low
= INTVAL (value
);
5331 /* Pick the higher word, shifted to the least significant
5332 bits, and sign-extend it. */
5333 high
= INTVAL (value
);
5334 high
>>= bits_per_word
- 1;
5337 if (high
& sign_bit
)
5338 high
|= sign_extend
;
5340 /* Store the words in the target machine order. */
5341 if (WORDS_BIG_ENDIAN
)
5343 *first
= GEN_INT (high
);
5344 *second
= GEN_INT (low
);
5348 *first
= GEN_INT (low
);
5349 *second
= GEN_INT (high
);
5354 /* The rule for using CONST_INT for a wider mode
5355 is that we regard the value as signed.
5356 So sign-extend it. */
5357 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5358 if (WORDS_BIG_ENDIAN
)
5370 else if (!CONST_DOUBLE_P (value
))
5372 if (WORDS_BIG_ENDIAN
)
5374 *first
= const0_rtx
;
5380 *second
= const0_rtx
;
5383 else if (GET_MODE (value
) == VOIDmode
5384 /* This is the old way we did CONST_DOUBLE integers. */
5385 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5387 /* In an integer, the words are defined as most and least significant.
5388 So order them by the target's convention. */
5389 if (WORDS_BIG_ENDIAN
)
5391 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5392 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5396 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5397 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5404 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5406 /* Note, this converts the REAL_VALUE_TYPE to the target's
5407 format, splits up the floating point double and outputs
5408 exactly 32 bits of it into each of l[0] and l[1] --
5409 not necessarily BITS_PER_WORD bits. */
5410 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5412 /* If 32 bits is an entire word for the target, but not for the host,
5413 then sign-extend on the host so that the number will look the same
5414 way on the host that it would on the target. See for instance
5415 simplify_unary_operation. The #if is needed to avoid compiler
5418 #if HOST_BITS_PER_LONG > 32
5419 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5421 if (l
[0] & ((long) 1 << 31))
5422 l
[0] |= ((long) (-1) << 32);
5423 if (l
[1] & ((long) 1 << 31))
5424 l
[1] |= ((long) (-1) << 32);
5428 *first
= GEN_INT (l
[0]);
5429 *second
= GEN_INT (l
[1]);