1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
42 /* Forward declarations */
43 static void set_of_1 (rtx
, const_rtx
, void *);
44 static bool covers_regno_p (const_rtx
, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
46 static int computed_jump_p_1 (const_rtx
);
47 static void parms_set (rtx
, const_rtx
, void *);
49 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
50 const_rtx
, enum machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
53 const_rtx
, enum machine_mode
,
54 unsigned HOST_WIDE_INT
);
55 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
58 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
59 enum machine_mode
, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
65 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
66 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
68 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
69 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
70 SIGN_EXTEND then while narrowing we also have to enforce the
71 representation and sign-extend the value to mode DESTINATION_REP.
73 If the value is already sign-extended to DESTINATION_REP mode we
74 can just switch to DESTINATION mode on it. For each pair of
75 integral modes SOURCE and DESTINATION, when truncating from SOURCE
76 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
77 contains the number of high-order bits in SOURCE that have to be
78 copies of the sign-bit so that we can do this mode-switch to
82 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
84 /* Store X into index I of ARRAY. ARRAY is known to have at least I
85 elements. Return the new base of ARRAY. */
88 typename
T::value_type
*
89 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
91 size_t i
, value_type x
)
93 if (base
== array
.stack
)
100 gcc_checking_assert (i
== LOCAL_ELEMS
);
101 vec_safe_grow (array
.heap
, i
+ 1);
102 base
= array
.heap
->address ();
103 memcpy (base
, array
.stack
, sizeof (array
.stack
));
104 base
[LOCAL_ELEMS
] = x
;
107 unsigned int length
= array
.heap
->length ();
110 gcc_checking_assert (base
== array
.heap
->address ());
116 gcc_checking_assert (i
== length
);
117 vec_safe_push (array
.heap
, x
);
118 return array
.heap
->address ();
122 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
125 template <typename T
>
127 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
129 size_t end
, rtx_type x
)
131 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
132 size_t orig_end
= end
;
133 for (int i
= 0; format
[i
]; ++i
)
134 if (format
[i
] == 'e')
136 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
137 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
140 base
= add_single_to_queue (array
, base
, end
++, subx
);
142 else if (format
[i
] == 'E')
144 int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
145 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
146 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
147 for (int j
= 0; j
< length
; j
++)
148 base
[end
++] = T::get_value (vec
[j
]);
150 for (int j
= 0; j
< length
; j
++)
151 base
= add_single_to_queue (array
, base
, end
++,
152 T::get_value (vec
[j
]));
154 return end
- orig_end
;
157 template <typename T
>
159 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
161 vec_free (array
.heap
);
164 template <typename T
>
165 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
167 template class generic_subrtx_iterator
<const_rtx_accessor
>;
168 template class generic_subrtx_iterator
<rtx_var_accessor
>;
169 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
171 /* Return 1 if the value of X is unstable
172 (would be different at a different point in the program).
173 The frame pointer, arg pointer, etc. are considered stable
174 (within one function) and so is anything marked `unchanging'. */
177 rtx_unstable_p (const_rtx x
)
179 const RTX_CODE code
= GET_CODE (x
);
186 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
195 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
196 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
197 /* The arg pointer varies if it is not a fixed register. */
198 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
200 /* ??? When call-clobbered, the value is stable modulo the restore
201 that must happen after a call. This currently screws up local-alloc
202 into believing that the restore is not needed. */
203 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
208 if (MEM_VOLATILE_P (x
))
217 fmt
= GET_RTX_FORMAT (code
);
218 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
221 if (rtx_unstable_p (XEXP (x
, i
)))
224 else if (fmt
[i
] == 'E')
227 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
228 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
235 /* Return 1 if X has a value that can vary even between two
236 executions of the program. 0 means X can be compared reliably
237 against certain constants or near-constants.
238 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
239 zero, we are slightly more conservative.
240 The frame pointer and the arg pointer are considered constant. */
243 rtx_varies_p (const_rtx x
, bool for_alias
)
256 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
265 /* Note that we have to test for the actual rtx used for the frame
266 and arg pointers and not just the register number in case we have
267 eliminated the frame and/or arg pointer and are using it
269 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
270 /* The arg pointer varies if it is not a fixed register. */
271 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
273 if (x
== pic_offset_table_rtx
274 /* ??? When call-clobbered, the value is stable modulo the restore
275 that must happen after a call. This currently screws up
276 local-alloc into believing that the restore is not needed, so we
277 must return 0 only if we are called from alias analysis. */
278 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
283 /* The operand 0 of a LO_SUM is considered constant
284 (in fact it is related specifically to operand 1)
285 during alias analysis. */
286 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
287 || rtx_varies_p (XEXP (x
, 1), for_alias
);
290 if (MEM_VOLATILE_P (x
))
299 fmt
= GET_RTX_FORMAT (code
);
300 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
303 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
306 else if (fmt
[i
] == 'E')
309 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
310 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
317 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
318 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
319 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
320 references on strict alignment machines. */
323 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
324 enum machine_mode mode
, bool unaligned_mems
)
326 enum rtx_code code
= GET_CODE (x
);
328 /* The offset must be a multiple of the mode size if we are considering
329 unaligned memory references on strict alignment machines. */
330 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
332 HOST_WIDE_INT actual_offset
= offset
;
334 #ifdef SPARC_STACK_BOUNDARY_HACK
335 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
336 the real alignment of %sp. However, when it does this, the
337 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
338 if (SPARC_STACK_BOUNDARY_HACK
339 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
340 actual_offset
-= STACK_POINTER_OFFSET
;
343 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
350 if (SYMBOL_REF_WEAK (x
))
352 if (!CONSTANT_POOL_ADDRESS_P (x
))
355 HOST_WIDE_INT decl_size
;
360 size
= GET_MODE_SIZE (mode
);
364 /* If the size of the access or of the symbol is unknown,
366 decl
= SYMBOL_REF_DECL (x
);
368 /* Else check that the access is in bounds. TODO: restructure
369 expr_size/tree_expr_size/int_expr_size and just use the latter. */
372 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
373 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
374 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
376 else if (TREE_CODE (decl
) == STRING_CST
)
377 decl_size
= TREE_STRING_LENGTH (decl
);
378 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
379 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
383 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
392 /* Stack references are assumed not to trap, but we need to deal with
393 nonsensical offsets. */
394 if (x
== frame_pointer_rtx
)
396 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
398 size
= GET_MODE_SIZE (mode
);
399 if (FRAME_GROWS_DOWNWARD
)
401 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
406 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
411 /* ??? Need to add a similar guard for nonsensical offsets. */
412 if (x
== hard_frame_pointer_rtx
413 || x
== stack_pointer_rtx
414 /* The arg pointer varies if it is not a fixed register. */
415 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
417 /* All of the virtual frame registers are stack references. */
418 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
419 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
424 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
425 mode
, unaligned_mems
);
428 /* An address is assumed not to trap if:
429 - it is the pic register plus a constant. */
430 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
433 /* - or it is an address that can't trap plus a constant integer. */
434 if (CONST_INT_P (XEXP (x
, 1))
435 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
436 size
, mode
, unaligned_mems
))
443 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
444 mode
, unaligned_mems
);
451 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
452 mode
, unaligned_mems
);
458 /* If it isn't one of the case above, it can cause a trap. */
462 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
465 rtx_addr_can_trap_p (const_rtx x
)
467 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
470 /* Return true if X is an address that is known to not be zero. */
473 nonzero_address_p (const_rtx x
)
475 const enum rtx_code code
= GET_CODE (x
);
480 return !SYMBOL_REF_WEAK (x
);
486 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
487 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
488 || x
== stack_pointer_rtx
489 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
491 /* All of the virtual frame registers are stack references. */
492 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
493 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
498 return nonzero_address_p (XEXP (x
, 0));
501 /* Handle PIC references. */
502 if (XEXP (x
, 0) == pic_offset_table_rtx
503 && CONSTANT_P (XEXP (x
, 1)))
508 /* Similar to the above; allow positive offsets. Further, since
509 auto-inc is only allowed in memories, the register must be a
511 if (CONST_INT_P (XEXP (x
, 1))
512 && INTVAL (XEXP (x
, 1)) > 0)
514 return nonzero_address_p (XEXP (x
, 0));
517 /* Similarly. Further, the offset is always positive. */
524 return nonzero_address_p (XEXP (x
, 0));
527 return nonzero_address_p (XEXP (x
, 1));
533 /* If it isn't one of the case above, might be zero. */
537 /* Return 1 if X refers to a memory location whose address
538 cannot be compared reliably with constant addresses,
539 or if X refers to a BLKmode memory object.
540 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
541 zero, we are slightly more conservative. */
544 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
555 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
557 fmt
= GET_RTX_FORMAT (code
);
558 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
561 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
564 else if (fmt
[i
] == 'E')
567 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
568 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
574 /* Return the CALL in X if there is one. */
577 get_call_rtx_from (rtx x
)
581 if (GET_CODE (x
) == PARALLEL
)
582 x
= XVECEXP (x
, 0, 0);
583 if (GET_CODE (x
) == SET
)
585 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
590 /* Return the value of the integer term in X, if one is apparent;
592 Only obvious integer terms are detected.
593 This is used in cse.c with the `related_value' field. */
596 get_integer_term (const_rtx x
)
598 if (GET_CODE (x
) == CONST
)
601 if (GET_CODE (x
) == MINUS
602 && CONST_INT_P (XEXP (x
, 1)))
603 return - INTVAL (XEXP (x
, 1));
604 if (GET_CODE (x
) == PLUS
605 && CONST_INT_P (XEXP (x
, 1)))
606 return INTVAL (XEXP (x
, 1));
610 /* If X is a constant, return the value sans apparent integer term;
612 Only obvious integer terms are detected. */
615 get_related_value (const_rtx x
)
617 if (GET_CODE (x
) != CONST
)
620 if (GET_CODE (x
) == PLUS
621 && CONST_INT_P (XEXP (x
, 1)))
623 else if (GET_CODE (x
) == MINUS
624 && CONST_INT_P (XEXP (x
, 1)))
629 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
630 to somewhere in the same object or object_block as SYMBOL. */
633 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
637 if (GET_CODE (symbol
) != SYMBOL_REF
)
645 if (CONSTANT_POOL_ADDRESS_P (symbol
)
646 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
649 decl
= SYMBOL_REF_DECL (symbol
);
650 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
654 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
655 && SYMBOL_REF_BLOCK (symbol
)
656 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
657 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
658 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
664 /* Split X into a base and a constant offset, storing them in *BASE_OUT
665 and *OFFSET_OUT respectively. */
668 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
670 if (GET_CODE (x
) == CONST
)
673 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
675 *base_out
= XEXP (x
, 0);
676 *offset_out
= XEXP (x
, 1);
681 *offset_out
= const0_rtx
;
684 /* Return the number of places FIND appears within X. If COUNT_DEST is
685 zero, we do not count occurrences inside the destination of a SET. */
688 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
692 const char *format_ptr
;
711 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
713 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
717 if (MEM_P (find
) && rtx_equal_p (x
, find
))
722 if (SET_DEST (x
) == find
&& ! count_dest
)
723 return count_occurrences (SET_SRC (x
), find
, count_dest
);
730 format_ptr
= GET_RTX_FORMAT (code
);
733 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
735 switch (*format_ptr
++)
738 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
742 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
743 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
751 /* Return TRUE if OP is a register or subreg of a register that
752 holds an unsigned quantity. Otherwise, return FALSE. */
755 unsigned_reg_p (rtx op
)
759 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
762 if (GET_CODE (op
) == SUBREG
763 && SUBREG_PROMOTED_SIGN (op
))
770 /* Nonzero if register REG appears somewhere within IN.
771 Also works if REG is not a register; in this case it checks
772 for a subexpression of IN that is Lisp "equal" to REG. */
775 reg_mentioned_p (const_rtx reg
, const_rtx in
)
787 if (GET_CODE (in
) == LABEL_REF
)
788 return reg
== XEXP (in
, 0);
790 code
= GET_CODE (in
);
794 /* Compare registers by number. */
796 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
798 /* These codes have no constituent expressions
806 /* These are kept unique for a given value. */
813 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
816 fmt
= GET_RTX_FORMAT (code
);
818 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
823 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
824 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
827 else if (fmt
[i
] == 'e'
828 && reg_mentioned_p (reg
, XEXP (in
, i
)))
834 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
835 no CODE_LABEL insn. */
838 no_labels_between_p (const_rtx beg
, const_rtx end
)
843 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
849 /* Nonzero if register REG is used in an insn between
850 FROM_INSN and TO_INSN (exclusive of those two). */
853 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
857 if (from_insn
== to_insn
)
860 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
861 if (NONDEBUG_INSN_P (insn
)
862 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
863 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
868 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
869 is entirely replaced by a new value and the only use is as a SET_DEST,
870 we do not consider it a reference. */
873 reg_referenced_p (const_rtx x
, const_rtx body
)
877 switch (GET_CODE (body
))
880 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
883 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
884 of a REG that occupies all of the REG, the insn references X if
885 it is mentioned in the destination. */
886 if (GET_CODE (SET_DEST (body
)) != CC0
887 && GET_CODE (SET_DEST (body
)) != PC
888 && !REG_P (SET_DEST (body
))
889 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
890 && REG_P (SUBREG_REG (SET_DEST (body
)))
891 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
892 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
893 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
894 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
895 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
900 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
901 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
908 return reg_overlap_mentioned_p (x
, body
);
911 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
914 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
917 case UNSPEC_VOLATILE
:
918 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
919 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
924 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
925 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
930 if (MEM_P (XEXP (body
, 0)))
931 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
936 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
938 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
945 /* Nonzero if register REG is set or clobbered in an insn between
946 FROM_INSN and TO_INSN (exclusive of those two). */
949 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
951 const rtx_insn
*insn
;
953 if (from_insn
== to_insn
)
956 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
957 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
962 /* Internals of reg_set_between_p. */
964 reg_set_p (const_rtx reg
, const_rtx insn
)
966 /* We can be passed an insn or part of one. If we are passed an insn,
967 check if a side-effect of the insn clobbers REG. */
969 && (FIND_REG_INC_NOTE (insn
, reg
)
972 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
973 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
974 GET_MODE (reg
), REGNO (reg
)))
976 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
979 return set_of (reg
, insn
) != NULL_RTX
;
982 /* Similar to reg_set_between_p, but check all registers in X. Return 0
983 only if none of them are modified between START and END. Return 1 if
984 X contains a MEM; this routine does use memory aliasing. */
987 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
989 const enum rtx_code code
= GET_CODE (x
);
1010 if (modified_between_p (XEXP (x
, 0), start
, end
))
1012 if (MEM_READONLY_P (x
))
1014 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1015 if (memory_modified_in_insn_p (x
, insn
))
1021 return reg_set_between_p (x
, start
, end
);
1027 fmt
= GET_RTX_FORMAT (code
);
1028 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1030 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1033 else if (fmt
[i
] == 'E')
1034 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1035 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1042 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1043 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1044 does use memory aliasing. */
1047 modified_in_p (const_rtx x
, const_rtx insn
)
1049 const enum rtx_code code
= GET_CODE (x
);
1066 if (modified_in_p (XEXP (x
, 0), insn
))
1068 if (MEM_READONLY_P (x
))
1070 if (memory_modified_in_insn_p (x
, insn
))
1076 return reg_set_p (x
, insn
);
1082 fmt
= GET_RTX_FORMAT (code
);
1083 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1085 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1088 else if (fmt
[i
] == 'E')
1089 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1090 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1097 /* Helper function for set_of. */
1105 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1107 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1108 if (rtx_equal_p (x
, data
->pat
)
1109 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1113 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1114 (either directly or via STRICT_LOW_PART and similar modifiers). */
1116 set_of (const_rtx pat
, const_rtx insn
)
1118 struct set_of_data data
;
1119 data
.found
= NULL_RTX
;
1121 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1125 /* Add all hard register in X to *PSET. */
1127 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1129 subrtx_iterator::array_type array
;
1130 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1132 const_rtx x
= *iter
;
1133 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1134 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1138 /* This function, called through note_stores, collects sets and
1139 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1142 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1144 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1145 if (REG_P (x
) && HARD_REGISTER_P (x
))
1146 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1149 /* Examine INSN, and compute the set of hard registers written by it.
1150 Store it in *PSET. Should only be called after reload. */
1152 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1156 CLEAR_HARD_REG_SET (*pset
);
1157 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1161 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1163 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1164 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1166 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1167 if (REG_NOTE_KIND (link
) == REG_INC
)
1168 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1171 /* Like record_hard_reg_sets, but called through note_uses. */
1173 record_hard_reg_uses (rtx
*px
, void *data
)
1175 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1178 /* Given an INSN, return a SET expression if this insn has only a single SET.
1179 It may also have CLOBBERs, USEs, or SET whose output
1180 will not be used, which we ignore. */
1183 single_set_2 (const_rtx insn
, const_rtx pat
)
1186 int set_verified
= 1;
1189 if (GET_CODE (pat
) == PARALLEL
)
1191 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1193 rtx sub
= XVECEXP (pat
, 0, i
);
1194 switch (GET_CODE (sub
))
1201 /* We can consider insns having multiple sets, where all
1202 but one are dead as single set insns. In common case
1203 only single set is present in the pattern so we want
1204 to avoid checking for REG_UNUSED notes unless necessary.
1206 When we reach set first time, we just expect this is
1207 the single set we are looking for and only when more
1208 sets are found in the insn, we check them. */
1211 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1212 && !side_effects_p (set
))
1218 set
= sub
, set_verified
= 0;
1219 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1220 || side_effects_p (sub
))
1232 /* Given an INSN, return nonzero if it has more than one SET, else return
1236 multiple_sets (const_rtx insn
)
1241 /* INSN must be an insn. */
1242 if (! INSN_P (insn
))
1245 /* Only a PARALLEL can have multiple SETs. */
1246 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1248 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1249 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1251 /* If we have already found a SET, then return now. */
1259 /* Either zero or one SET. */
1263 /* Return nonzero if the destination of SET equals the source
1264 and there are no side effects. */
1267 set_noop_p (const_rtx set
)
1269 rtx src
= SET_SRC (set
);
1270 rtx dst
= SET_DEST (set
);
1272 if (dst
== pc_rtx
&& src
== pc_rtx
)
1275 if (MEM_P (dst
) && MEM_P (src
))
1276 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1278 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1279 return rtx_equal_p (XEXP (dst
, 0), src
)
1280 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1281 && !side_effects_p (src
);
1283 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1284 dst
= XEXP (dst
, 0);
1286 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1288 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1290 src
= SUBREG_REG (src
);
1291 dst
= SUBREG_REG (dst
);
1294 /* It is a NOOP if destination overlaps with selected src vector
1296 if (GET_CODE (src
) == VEC_SELECT
1297 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1298 && HARD_REGISTER_P (XEXP (src
, 0))
1299 && HARD_REGISTER_P (dst
))
1302 rtx par
= XEXP (src
, 1);
1303 rtx src0
= XEXP (src
, 0);
1304 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1305 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1307 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1308 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1311 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1312 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1315 return (REG_P (src
) && REG_P (dst
)
1316 && REGNO (src
) == REGNO (dst
));
1319 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1323 noop_move_p (const_rtx insn
)
1325 rtx pat
= PATTERN (insn
);
1327 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1330 /* Insns carrying these notes are useful later on. */
1331 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1334 /* Check the code to be executed for COND_EXEC. */
1335 if (GET_CODE (pat
) == COND_EXEC
)
1336 pat
= COND_EXEC_CODE (pat
);
1338 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1341 if (GET_CODE (pat
) == PARALLEL
)
1344 /* If nothing but SETs of registers to themselves,
1345 this insn can also be deleted. */
1346 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1348 rtx tem
= XVECEXP (pat
, 0, i
);
1350 if (GET_CODE (tem
) == USE
1351 || GET_CODE (tem
) == CLOBBER
)
1354 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1364 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1365 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1366 If the object was modified, if we hit a partial assignment to X, or hit a
1367 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1368 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1372 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1376 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1380 rtx set
= single_set (p
);
1381 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1383 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1385 rtx src
= SET_SRC (set
);
1387 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1388 src
= XEXP (note
, 0);
1390 if ((valid_to
== NULL_RTX
1391 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1392 /* Reject hard registers because we don't usually want
1393 to use them; we'd rather use a pseudo. */
1395 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1402 /* If set in non-simple way, we don't have a value. */
1403 if (reg_set_p (x
, p
))
1410 /* Return nonzero if register in range [REGNO, ENDREGNO)
1411 appears either explicitly or implicitly in X
1412 other than being stored into.
1414 References contained within the substructure at LOC do not count.
1415 LOC may be zero, meaning don't ignore anything. */
1418 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1422 unsigned int x_regno
;
1427 /* The contents of a REG_NONNEG note is always zero, so we must come here
1428 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1432 code
= GET_CODE (x
);
1437 x_regno
= REGNO (x
);
1439 /* If we modifying the stack, frame, or argument pointer, it will
1440 clobber a virtual register. In fact, we could be more precise,
1441 but it isn't worth it. */
1442 if ((x_regno
== STACK_POINTER_REGNUM
1443 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1444 || x_regno
== ARG_POINTER_REGNUM
1446 || x_regno
== FRAME_POINTER_REGNUM
)
1447 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1450 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1453 /* If this is a SUBREG of a hard reg, we can see exactly which
1454 registers are being modified. Otherwise, handle normally. */
1455 if (REG_P (SUBREG_REG (x
))
1456 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1458 unsigned int inner_regno
= subreg_regno (x
);
1459 unsigned int inner_endregno
1460 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1461 ? subreg_nregs (x
) : 1);
1463 return endregno
> inner_regno
&& regno
< inner_endregno
;
1469 if (&SET_DEST (x
) != loc
1470 /* Note setting a SUBREG counts as referring to the REG it is in for
1471 a pseudo but not for hard registers since we can
1472 treat each word individually. */
1473 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1474 && loc
!= &SUBREG_REG (SET_DEST (x
))
1475 && REG_P (SUBREG_REG (SET_DEST (x
)))
1476 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1477 && refers_to_regno_p (regno
, endregno
,
1478 SUBREG_REG (SET_DEST (x
)), loc
))
1479 || (!REG_P (SET_DEST (x
))
1480 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1483 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1492 /* X does not match, so try its subexpressions. */
1494 fmt
= GET_RTX_FORMAT (code
);
1495 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1497 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1505 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1508 else if (fmt
[i
] == 'E')
1511 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1512 if (loc
!= &XVECEXP (x
, i
, j
)
1513 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1520 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1521 we check if any register number in X conflicts with the relevant register
1522 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1523 contains a MEM (we don't bother checking for memory addresses that can't
1524 conflict because we expect this to be a rare case. */
1527 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1529 unsigned int regno
, endregno
;
1531 /* If either argument is a constant, then modifying X can not
1532 affect IN. Here we look at IN, we can profitably combine
1533 CONSTANT_P (x) with the switch statement below. */
1534 if (CONSTANT_P (in
))
1538 switch (GET_CODE (x
))
1540 case STRICT_LOW_PART
:
1543 /* Overly conservative. */
1548 regno
= REGNO (SUBREG_REG (x
));
1549 if (regno
< FIRST_PSEUDO_REGISTER
)
1550 regno
= subreg_regno (x
);
1551 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1552 ? subreg_nregs (x
) : 1);
1557 endregno
= END_REGNO (x
);
1559 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1569 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1570 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1573 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1576 else if (fmt
[i
] == 'E')
1579 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1580 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1590 return reg_mentioned_p (x
, in
);
1596 /* If any register in here refers to it we return true. */
1597 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1598 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1599 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1605 gcc_assert (CONSTANT_P (x
));
1610 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1611 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1612 ignored by note_stores, but passed to FUN.
1614 FUN receives three arguments:
1615 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1616 2. the SET or CLOBBER rtx that does the store,
1617 3. the pointer DATA provided to note_stores.
1619 If the item being stored in or clobbered is a SUBREG of a hard register,
1620 the SUBREG will be passed. */
1623 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1627 if (GET_CODE (x
) == COND_EXEC
)
1628 x
= COND_EXEC_CODE (x
);
1630 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1632 rtx dest
= SET_DEST (x
);
1634 while ((GET_CODE (dest
) == SUBREG
1635 && (!REG_P (SUBREG_REG (dest
))
1636 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1637 || GET_CODE (dest
) == ZERO_EXTRACT
1638 || GET_CODE (dest
) == STRICT_LOW_PART
)
1639 dest
= XEXP (dest
, 0);
1641 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1642 each of whose first operand is a register. */
1643 if (GET_CODE (dest
) == PARALLEL
)
1645 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1646 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1647 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1650 (*fun
) (dest
, x
, data
);
1653 else if (GET_CODE (x
) == PARALLEL
)
1654 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1655 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1658 /* Like notes_stores, but call FUN for each expression that is being
1659 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1660 FUN for each expression, not any interior subexpressions. FUN receives a
1661 pointer to the expression and the DATA passed to this function.
1663 Note that this is not quite the same test as that done in reg_referenced_p
1664 since that considers something as being referenced if it is being
1665 partially set, while we do not. */
1668 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1673 switch (GET_CODE (body
))
1676 (*fun
) (&COND_EXEC_TEST (body
), data
);
1677 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1681 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1682 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1686 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1687 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1691 (*fun
) (&XEXP (body
, 0), data
);
1695 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1696 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1700 (*fun
) (&TRAP_CONDITION (body
), data
);
1704 (*fun
) (&XEXP (body
, 0), data
);
1708 case UNSPEC_VOLATILE
:
1709 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1710 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1714 if (MEM_P (XEXP (body
, 0)))
1715 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1720 rtx dest
= SET_DEST (body
);
1722 /* For sets we replace everything in source plus registers in memory
1723 expression in store and operands of a ZERO_EXTRACT. */
1724 (*fun
) (&SET_SRC (body
), data
);
1726 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1728 (*fun
) (&XEXP (dest
, 1), data
);
1729 (*fun
) (&XEXP (dest
, 2), data
);
1732 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1733 dest
= XEXP (dest
, 0);
1736 (*fun
) (&XEXP (dest
, 0), data
);
1741 /* All the other possibilities never store. */
1742 (*fun
) (pbody
, data
);
1747 /* Return nonzero if X's old contents don't survive after INSN.
1748 This will be true if X is (cc0) or if X is a register and
1749 X dies in INSN or because INSN entirely sets X.
1751 "Entirely set" means set directly and not through a SUBREG, or
1752 ZERO_EXTRACT, so no trace of the old contents remains.
1753 Likewise, REG_INC does not count.
1755 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1756 but for this use that makes no difference, since regs don't overlap
1757 during their lifetimes. Therefore, this function may be used
1758 at any time after deaths have been computed.
1760 If REG is a hard reg that occupies multiple machine registers, this
1761 function will only return 1 if each of those registers will be replaced
1765 dead_or_set_p (const_rtx insn
, const_rtx x
)
1767 unsigned int regno
, end_regno
;
1770 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1771 if (GET_CODE (x
) == CC0
)
1774 gcc_assert (REG_P (x
));
1777 end_regno
= END_REGNO (x
);
1778 for (i
= regno
; i
< end_regno
; i
++)
1779 if (! dead_or_set_regno_p (insn
, i
))
1785 /* Return TRUE iff DEST is a register or subreg of a register and
1786 doesn't change the number of words of the inner register, and any
1787 part of the register is TEST_REGNO. */
1790 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1792 unsigned int regno
, endregno
;
1794 if (GET_CODE (dest
) == SUBREG
1795 && (((GET_MODE_SIZE (GET_MODE (dest
))
1796 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1797 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1798 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1799 dest
= SUBREG_REG (dest
);
1804 regno
= REGNO (dest
);
1805 endregno
= END_REGNO (dest
);
1806 return (test_regno
>= regno
&& test_regno
< endregno
);
1809 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1810 any member matches the covers_regno_no_parallel_p criteria. */
1813 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1815 if (GET_CODE (dest
) == PARALLEL
)
1817 /* Some targets place small structures in registers for return
1818 values of functions, and those registers are wrapped in
1819 PARALLELs that we may see as the destination of a SET. */
1822 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1824 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1825 if (inner
!= NULL_RTX
1826 && covers_regno_no_parallel_p (inner
, test_regno
))
1833 return covers_regno_no_parallel_p (dest
, test_regno
);
1836 /* Utility function for dead_or_set_p to check an individual register. */
1839 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1843 /* See if there is a death note for something that includes TEST_REGNO. */
1844 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1848 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1851 pattern
= PATTERN (insn
);
1853 /* If a COND_EXEC is not executed, the value survives. */
1854 if (GET_CODE (pattern
) == COND_EXEC
)
1857 if (GET_CODE (pattern
) == SET
)
1858 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1859 else if (GET_CODE (pattern
) == PARALLEL
)
1863 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1865 rtx body
= XVECEXP (pattern
, 0, i
);
1867 if (GET_CODE (body
) == COND_EXEC
)
1868 body
= COND_EXEC_CODE (body
);
1870 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1871 && covers_regno_p (SET_DEST (body
), test_regno
))
1879 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1880 If DATUM is nonzero, look for one whose datum is DATUM. */
1883 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1887 gcc_checking_assert (insn
);
1889 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1890 if (! INSN_P (insn
))
1894 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1895 if (REG_NOTE_KIND (link
) == kind
)
1900 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1901 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1906 /* Return the reg-note of kind KIND in insn INSN which applies to register
1907 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1908 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1909 it might be the case that the note overlaps REGNO. */
1912 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1916 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1917 if (! INSN_P (insn
))
1920 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1921 if (REG_NOTE_KIND (link
) == kind
1922 /* Verify that it is a register, so that scratch and MEM won't cause a
1924 && REG_P (XEXP (link
, 0))
1925 && REGNO (XEXP (link
, 0)) <= regno
1926 && END_REGNO (XEXP (link
, 0)) > regno
)
1931 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1935 find_reg_equal_equiv_note (const_rtx insn
)
1942 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1943 if (REG_NOTE_KIND (link
) == REG_EQUAL
1944 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1946 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1947 insns that have multiple sets. Checking single_set to
1948 make sure of this is not the proper check, as explained
1949 in the comment in set_unique_reg_note.
1951 This should be changed into an assert. */
1952 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1959 /* Check whether INSN is a single_set whose source is known to be
1960 equivalent to a constant. Return that constant if so, otherwise
1964 find_constant_src (const_rtx insn
)
1968 set
= single_set (insn
);
1971 x
= avoid_constant_pool_reference (SET_SRC (set
));
1976 note
= find_reg_equal_equiv_note (insn
);
1977 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1978 return XEXP (note
, 0);
1983 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1984 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1987 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1989 /* If it's not a CALL_INSN, it can't possibly have a
1990 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2000 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2002 link
= XEXP (link
, 1))
2003 if (GET_CODE (XEXP (link
, 0)) == code
2004 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2009 unsigned int regno
= REGNO (datum
);
2011 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2012 to pseudo registers, so don't bother checking. */
2014 if (regno
< FIRST_PSEUDO_REGISTER
)
2016 unsigned int end_regno
= END_HARD_REGNO (datum
);
2019 for (i
= regno
; i
< end_regno
; i
++)
2020 if (find_regno_fusage (insn
, code
, i
))
2028 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2029 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2032 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2036 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2037 to pseudo registers, so don't bother checking. */
2039 if (regno
>= FIRST_PSEUDO_REGISTER
2043 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2047 if (GET_CODE (op
= XEXP (link
, 0)) == code
2048 && REG_P (reg
= XEXP (op
, 0))
2049 && REGNO (reg
) <= regno
2050 && END_HARD_REGNO (reg
) > regno
)
2058 /* Return true if KIND is an integer REG_NOTE. */
2061 int_reg_note_p (enum reg_note kind
)
2063 return kind
== REG_BR_PROB
;
2066 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2067 stored as the pointer to the next register note. */
2070 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2074 gcc_checking_assert (!int_reg_note_p (kind
));
2079 case REG_LABEL_TARGET
:
2080 case REG_LABEL_OPERAND
:
2082 /* These types of register notes use an INSN_LIST rather than an
2083 EXPR_LIST, so that copying is done right and dumps look
2085 note
= alloc_INSN_LIST (datum
, list
);
2086 PUT_REG_NOTE_KIND (note
, kind
);
2090 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2097 /* Add register note with kind KIND and datum DATUM to INSN. */
2100 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2102 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2105 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2108 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2110 gcc_checking_assert (int_reg_note_p (kind
));
2111 REG_NOTES (insn
) = gen_rtx_INT_LIST ((enum machine_mode
) kind
,
2112 datum
, REG_NOTES (insn
));
2115 /* Add a register note like NOTE to INSN. */
2118 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2120 if (GET_CODE (note
) == INT_LIST
)
2121 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2123 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2126 /* Remove register note NOTE from the REG_NOTES of INSN. */
2129 remove_note (rtx insn
, const_rtx note
)
2133 if (note
== NULL_RTX
)
2136 if (REG_NOTES (insn
) == note
)
2137 REG_NOTES (insn
) = XEXP (note
, 1);
2139 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2140 if (XEXP (link
, 1) == note
)
2142 XEXP (link
, 1) = XEXP (note
, 1);
2146 switch (REG_NOTE_KIND (note
))
2150 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2157 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2160 remove_reg_equal_equiv_notes (rtx insn
)
2164 loc
= ®_NOTES (insn
);
2167 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2168 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2169 *loc
= XEXP (*loc
, 1);
2171 loc
= &XEXP (*loc
, 1);
2175 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2178 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2185 /* This loop is a little tricky. We cannot just go down the chain because
2186 it is being modified by some actions in the loop. So we just iterate
2187 over the head. We plan to drain the list anyway. */
2188 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2190 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2191 rtx note
= find_reg_equal_equiv_note (insn
);
2193 /* This assert is generally triggered when someone deletes a REG_EQUAL
2194 or REG_EQUIV note by hacking the list manually rather than calling
2198 remove_note (insn
, note
);
2202 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2203 return 1 if it is found. A simple equality test is used to determine if
2207 in_expr_list_p (const_rtx listp
, const_rtx node
)
2211 for (x
= listp
; x
; x
= XEXP (x
, 1))
2212 if (node
== XEXP (x
, 0))
2218 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2219 remove that entry from the list if it is found.
2221 A simple equality test is used to determine if NODE matches. */
2224 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2226 rtx_expr_list
*temp
= *listp
;
2227 rtx prev
= NULL_RTX
;
2231 if (node
== temp
->element ())
2233 /* Splice the node out of the list. */
2235 XEXP (prev
, 1) = temp
->next ();
2237 *listp
= temp
->next ();
2243 temp
= temp
->next ();
2247 /* Nonzero if X contains any volatile instructions. These are instructions
2248 which may cause unpredictable machine state instructions, and thus no
2249 instructions or register uses should be moved or combined across them.
2250 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2253 volatile_insn_p (const_rtx x
)
2255 const RTX_CODE code
= GET_CODE (x
);
2273 case UNSPEC_VOLATILE
:
2278 if (MEM_VOLATILE_P (x
))
2285 /* Recursively scan the operands of this expression. */
2288 const char *const fmt
= GET_RTX_FORMAT (code
);
2291 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2295 if (volatile_insn_p (XEXP (x
, i
)))
2298 else if (fmt
[i
] == 'E')
2301 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2302 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2310 /* Nonzero if X contains any volatile memory references
2311 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2314 volatile_refs_p (const_rtx x
)
2316 const RTX_CODE code
= GET_CODE (x
);
2332 case UNSPEC_VOLATILE
:
2338 if (MEM_VOLATILE_P (x
))
2345 /* Recursively scan the operands of this expression. */
2348 const char *const fmt
= GET_RTX_FORMAT (code
);
2351 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2355 if (volatile_refs_p (XEXP (x
, i
)))
2358 else if (fmt
[i
] == 'E')
2361 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2362 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2370 /* Similar to above, except that it also rejects register pre- and post-
2374 side_effects_p (const_rtx x
)
2376 const RTX_CODE code
= GET_CODE (x
);
2393 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2394 when some combination can't be done. If we see one, don't think
2395 that we can simplify the expression. */
2396 return (GET_MODE (x
) != VOIDmode
);
2405 case UNSPEC_VOLATILE
:
2411 if (MEM_VOLATILE_P (x
))
2418 /* Recursively scan the operands of this expression. */
2421 const char *fmt
= GET_RTX_FORMAT (code
);
2424 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2428 if (side_effects_p (XEXP (x
, i
)))
2431 else if (fmt
[i
] == 'E')
2434 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2435 if (side_effects_p (XVECEXP (x
, i
, j
)))
2443 /* Return nonzero if evaluating rtx X might cause a trap.
2444 FLAGS controls how to consider MEMs. A nonzero means the context
2445 of the access may have changed from the original, such that the
2446 address may have become invalid. */
2449 may_trap_p_1 (const_rtx x
, unsigned flags
)
2455 /* We make no distinction currently, but this function is part of
2456 the internal target-hooks ABI so we keep the parameter as
2457 "unsigned flags". */
2458 bool code_changed
= flags
!= 0;
2462 code
= GET_CODE (x
);
2465 /* Handle these cases quickly. */
2477 return targetm
.unspec_may_trap_p (x
, flags
);
2479 case UNSPEC_VOLATILE
:
2485 return MEM_VOLATILE_P (x
);
2487 /* Memory ref can trap unless it's a static var or a stack slot. */
2489 /* Recognize specific pattern of stack checking probes. */
2490 if (flag_stack_check
2491 && MEM_VOLATILE_P (x
)
2492 && XEXP (x
, 0) == stack_pointer_rtx
)
2494 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2495 reference; moving it out of context such as when moving code
2496 when optimizing, might cause its address to become invalid. */
2498 || !MEM_NOTRAP_P (x
))
2500 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2501 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2502 GET_MODE (x
), code_changed
);
2507 /* Division by a non-constant might trap. */
2512 if (HONOR_SNANS (GET_MODE (x
)))
2514 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2515 return flag_trapping_math
;
2516 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2521 /* An EXPR_LIST is used to represent a function call. This
2522 certainly may trap. */
2531 /* Some floating point comparisons may trap. */
2532 if (!flag_trapping_math
)
2534 /* ??? There is no machine independent way to check for tests that trap
2535 when COMPARE is used, though many targets do make this distinction.
2536 For instance, sparc uses CCFPE for compares which generate exceptions
2537 and CCFP for compares which do not generate exceptions. */
2538 if (HONOR_NANS (GET_MODE (x
)))
2540 /* But often the compare has some CC mode, so check operand
2542 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2543 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2549 if (HONOR_SNANS (GET_MODE (x
)))
2551 /* Often comparison is CC mode, so check operand modes. */
2552 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2553 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2558 /* Conversion of floating point might trap. */
2559 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2566 /* These operations don't trap even with floating point. */
2570 /* Any floating arithmetic may trap. */
2571 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2575 fmt
= GET_RTX_FORMAT (code
);
2576 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2580 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2583 else if (fmt
[i
] == 'E')
2586 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2587 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2594 /* Return nonzero if evaluating rtx X might cause a trap. */
2597 may_trap_p (const_rtx x
)
2599 return may_trap_p_1 (x
, 0);
2602 /* Same as above, but additionally return nonzero if evaluating rtx X might
2603 cause a fault. We define a fault for the purpose of this function as a
2604 erroneous execution condition that cannot be encountered during the normal
2605 execution of a valid program; the typical example is an unaligned memory
2606 access on a strict alignment machine. The compiler guarantees that it
2607 doesn't generate code that will fault from a valid program, but this
2608 guarantee doesn't mean anything for individual instructions. Consider
2609 the following example:
2611 struct S { int d; union { char *cp; int *ip; }; };
2613 int foo(struct S *s)
2621 on a strict alignment machine. In a valid program, foo will never be
2622 invoked on a structure for which d is equal to 1 and the underlying
2623 unique field of the union not aligned on a 4-byte boundary, but the
2624 expression *s->ip might cause a fault if considered individually.
2626 At the RTL level, potentially problematic expressions will almost always
2627 verify may_trap_p; for example, the above dereference can be emitted as
2628 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2629 However, suppose that foo is inlined in a caller that causes s->cp to
2630 point to a local character variable and guarantees that s->d is not set
2631 to 1; foo may have been effectively translated into pseudo-RTL as:
2634 (set (reg:SI) (mem:SI (%fp - 7)))
2636 (set (reg:QI) (mem:QI (%fp - 7)))
2638 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2639 memory reference to a stack slot, but it will certainly cause a fault
2640 on a strict alignment machine. */
2643 may_trap_or_fault_p (const_rtx x
)
2645 return may_trap_p_1 (x
, 1);
2648 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2649 i.e., an inequality. */
2652 inequality_comparisons_p (const_rtx x
)
2656 const enum rtx_code code
= GET_CODE (x
);
2684 len
= GET_RTX_LENGTH (code
);
2685 fmt
= GET_RTX_FORMAT (code
);
2687 for (i
= 0; i
< len
; i
++)
2691 if (inequality_comparisons_p (XEXP (x
, i
)))
2694 else if (fmt
[i
] == 'E')
2697 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2698 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2706 /* Replace any occurrence of FROM in X with TO. The function does
2707 not enter into CONST_DOUBLE for the replace.
2709 Note that copying is not done so X must not be shared unless all copies
2710 are to be modified. */
2713 replace_rtx (rtx x
, rtx from
, rtx to
)
2721 /* Allow this function to make replacements in EXPR_LISTs. */
2725 if (GET_CODE (x
) == SUBREG
)
2727 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2729 if (CONST_INT_P (new_rtx
))
2731 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2732 GET_MODE (SUBREG_REG (x
)),
2737 SUBREG_REG (x
) = new_rtx
;
2741 else if (GET_CODE (x
) == ZERO_EXTEND
)
2743 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2745 if (CONST_INT_P (new_rtx
))
2747 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2748 new_rtx
, GET_MODE (XEXP (x
, 0)));
2752 XEXP (x
, 0) = new_rtx
;
2757 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2758 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2761 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2762 else if (fmt
[i
] == 'E')
2763 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2764 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2770 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2771 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2774 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2776 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2778 if (JUMP_TABLE_DATA_P (x
))
2781 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2782 int len
= GET_NUM_ELEM (vec
);
2783 for (int i
= 0; i
< len
; ++i
)
2785 rtx ref
= RTVEC_ELT (vec
, i
);
2786 if (XEXP (ref
, 0) == old_label
)
2788 XEXP (ref
, 0) = new_label
;
2789 if (update_label_nuses
)
2791 ++LABEL_NUSES (new_label
);
2792 --LABEL_NUSES (old_label
);
2799 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2800 field. This is not handled by the iterator because it doesn't
2801 handle unprinted ('0') fields. */
2802 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2803 JUMP_LABEL (x
) = new_label
;
2805 subrtx_ptr_iterator::array_type array
;
2806 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2811 if (GET_CODE (x
) == SYMBOL_REF
2812 && CONSTANT_POOL_ADDRESS_P (x
))
2814 rtx c
= get_pool_constant (x
);
2815 if (rtx_referenced_p (old_label
, c
))
2817 /* Create a copy of constant C; replace the label inside
2818 but do not update LABEL_NUSES because uses in constant pool
2820 rtx new_c
= copy_rtx (c
);
2821 replace_label (&new_c
, old_label
, new_label
, false);
2823 /* Add the new constant NEW_C to constant pool and replace
2824 the old reference to constant by new reference. */
2825 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2826 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2830 if ((GET_CODE (x
) == LABEL_REF
2831 || GET_CODE (x
) == INSN_LIST
)
2832 && XEXP (x
, 0) == old_label
)
2834 XEXP (x
, 0) = new_label
;
2835 if (update_label_nuses
)
2837 ++LABEL_NUSES (new_label
);
2838 --LABEL_NUSES (old_label
);
2846 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2847 bool update_label_nuses
)
2849 rtx insn_as_rtx
= insn
;
2850 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2851 gcc_checking_assert (insn_as_rtx
== insn
);
2854 /* Return true if X is referenced in BODY. */
2857 rtx_referenced_p (const_rtx x
, const_rtx body
)
2859 subrtx_iterator::array_type array
;
2860 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2861 if (const_rtx y
= *iter
)
2863 /* Check if a label_ref Y refers to label X. */
2864 if (GET_CODE (y
) == LABEL_REF
&& LABEL_P (y
) && XEXP (y
, 0) == x
)
2867 if (rtx_equal_p (x
, y
))
2870 /* If Y is a reference to pool constant traverse the constant. */
2871 if (GET_CODE (y
) == SYMBOL_REF
2872 && CONSTANT_POOL_ADDRESS_P (y
))
2873 iter
.substitute (get_pool_constant (y
));
2878 /* If INSN is a tablejump return true and store the label (before jump table) to
2879 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2882 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2889 label
= JUMP_LABEL (insn
);
2890 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2891 && (table
= NEXT_INSN (label
)) != NULL_RTX
2892 && JUMP_TABLE_DATA_P (table
))
2897 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2903 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2904 constant that is not in the constant pool and not in the condition
2905 of an IF_THEN_ELSE. */
2908 computed_jump_p_1 (const_rtx x
)
2910 const enum rtx_code code
= GET_CODE (x
);
2927 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2928 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2931 return (computed_jump_p_1 (XEXP (x
, 1))
2932 || computed_jump_p_1 (XEXP (x
, 2)));
2938 fmt
= GET_RTX_FORMAT (code
);
2939 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2942 && computed_jump_p_1 (XEXP (x
, i
)))
2945 else if (fmt
[i
] == 'E')
2946 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2947 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2954 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2956 Tablejumps and casesi insns are not considered indirect jumps;
2957 we can recognize them by a (use (label_ref)). */
2960 computed_jump_p (const_rtx insn
)
2965 rtx pat
= PATTERN (insn
);
2967 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2968 if (JUMP_LABEL (insn
) != NULL
)
2971 if (GET_CODE (pat
) == PARALLEL
)
2973 int len
= XVECLEN (pat
, 0);
2974 int has_use_labelref
= 0;
2976 for (i
= len
- 1; i
>= 0; i
--)
2977 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2978 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2981 has_use_labelref
= 1;
2985 if (! has_use_labelref
)
2986 for (i
= len
- 1; i
>= 0; i
--)
2987 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2988 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2989 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2992 else if (GET_CODE (pat
) == SET
2993 && SET_DEST (pat
) == pc_rtx
2994 && computed_jump_p_1 (SET_SRC (pat
)))
3000 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
3001 calls. Processes the subexpressions of EXP and passes them to F. */
3003 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
3006 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
3009 for (; format
[n
] != '\0'; n
++)
3016 result
= (*f
) (x
, data
);
3018 /* Do not traverse sub-expressions. */
3020 else if (result
!= 0)
3021 /* Stop the traversal. */
3025 /* There are no sub-expressions. */
3028 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3031 result
= for_each_rtx_1 (*x
, i
, f
, data
);
3039 if (XVEC (exp
, n
) == 0)
3041 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
3044 x
= &XVECEXP (exp
, n
, j
);
3045 result
= (*f
) (x
, data
);
3047 /* Do not traverse sub-expressions. */
3049 else if (result
!= 0)
3050 /* Stop the traversal. */
3054 /* There are no sub-expressions. */
3057 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3060 result
= for_each_rtx_1 (*x
, i
, f
, data
);
3068 /* Nothing to do. */
3076 /* Traverse X via depth-first search, calling F for each
3077 sub-expression (including X itself). F is also passed the DATA.
3078 If F returns -1, do not traverse sub-expressions, but continue
3079 traversing the rest of the tree. If F ever returns any other
3080 nonzero value, stop the traversal, and return the value returned
3081 by F. Otherwise, return 0. This function does not traverse inside
3082 tree structure that contains RTX_EXPRs, or into sub-expressions
3083 whose format code is `0' since it is not known whether or not those
3084 codes are actually RTL.
3086 This routine is very general, and could (should?) be used to
3087 implement many of the other routines in this file. */
3090 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
3096 result
= (*f
) (x
, data
);
3098 /* Do not traverse sub-expressions. */
3100 else if (result
!= 0)
3101 /* Stop the traversal. */
3105 /* There are no sub-expressions. */
3108 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3112 return for_each_rtx_1 (*x
, i
, f
, data
);
3115 /* Like "for_each_rtx", but for calling on an rtx_insn **. */
3118 for_each_rtx_in_insn (rtx_insn
**insn
, rtx_function f
, void *data
)
3120 rtx insn_as_rtx
= *insn
;
3123 result
= for_each_rtx (&insn_as_rtx
, f
, data
);
3125 if (insn_as_rtx
!= *insn
)
3126 *insn
= safe_as_a
<rtx_insn
*> (insn_as_rtx
);
3133 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3134 the equivalent add insn and pass the result to FN, using DATA as the
3138 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3140 rtx x
= XEXP (mem
, 0);
3141 switch (GET_CODE (x
))
3146 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3147 rtx r1
= XEXP (x
, 0);
3148 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3149 return fn (mem
, x
, r1
, r1
, c
, data
);
3155 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3156 rtx r1
= XEXP (x
, 0);
3157 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3158 return fn (mem
, x
, r1
, r1
, c
, data
);
3164 rtx r1
= XEXP (x
, 0);
3165 rtx add
= XEXP (x
, 1);
3166 return fn (mem
, x
, r1
, add
, NULL
, data
);
3174 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3175 For each such autoinc operation found, call FN, passing it
3176 the innermost enclosing MEM, the operation itself, the RTX modified
3177 by the operation, two RTXs (the second may be NULL) that, once
3178 added, represent the value to be held by the modified RTX
3179 afterwards, and DATA. FN is to return 0 to continue the
3180 traversal or any other value to have it returned to the caller of
3181 for_each_inc_dec. */
3184 for_each_inc_dec (rtx x
,
3185 for_each_inc_dec_fn fn
,
3188 subrtx_var_iterator::array_type array
;
3189 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3194 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3196 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3199 iter
.skip_subrtxes ();
3206 /* Searches X for any reference to REGNO, returning the rtx of the
3207 reference found if any. Otherwise, returns NULL_RTX. */
3210 regno_use_in (unsigned int regno
, rtx x
)
3216 if (REG_P (x
) && REGNO (x
) == regno
)
3219 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3220 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3224 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3227 else if (fmt
[i
] == 'E')
3228 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3229 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3236 /* Return a value indicating whether OP, an operand of a commutative
3237 operation, is preferred as the first or second operand. The higher
3238 the value, the stronger the preference for being the first operand.
3239 We use negative values to indicate a preference for the first operand
3240 and positive values for the second operand. */
3243 commutative_operand_precedence (rtx op
)
3245 enum rtx_code code
= GET_CODE (op
);
3247 /* Constants always come the second operand. Prefer "nice" constants. */
3248 if (code
== CONST_INT
)
3250 if (code
== CONST_WIDE_INT
)
3252 if (code
== CONST_DOUBLE
)
3254 if (code
== CONST_FIXED
)
3256 op
= avoid_constant_pool_reference (op
);
3257 code
= GET_CODE (op
);
3259 switch (GET_RTX_CLASS (code
))
3262 if (code
== CONST_INT
)
3264 if (code
== CONST_WIDE_INT
)
3266 if (code
== CONST_DOUBLE
)
3268 if (code
== CONST_FIXED
)
3273 /* SUBREGs of objects should come second. */
3274 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3279 /* Complex expressions should be the first, so decrease priority
3280 of objects. Prefer pointer objects over non pointer objects. */
3281 if ((REG_P (op
) && REG_POINTER (op
))
3282 || (MEM_P (op
) && MEM_POINTER (op
)))
3286 case RTX_COMM_ARITH
:
3287 /* Prefer operands that are themselves commutative to be first.
3288 This helps to make things linear. In particular,
3289 (and (and (reg) (reg)) (not (reg))) is canonical. */
3293 /* If only one operand is a binary expression, it will be the first
3294 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3295 is canonical, although it will usually be further simplified. */
3299 /* Then prefer NEG and NOT. */
3300 if (code
== NEG
|| code
== NOT
)
3308 /* Return 1 iff it is necessary to swap operands of commutative operation
3309 in order to canonicalize expression. */
3312 swap_commutative_operands_p (rtx x
, rtx y
)
3314 return (commutative_operand_precedence (x
)
3315 < commutative_operand_precedence (y
));
3318 /* Return 1 if X is an autoincrement side effect and the register is
3319 not the stack pointer. */
3321 auto_inc_p (const_rtx x
)
3323 switch (GET_CODE (x
))
3331 /* There are no REG_INC notes for SP. */
3332 if (XEXP (x
, 0) != stack_pointer_rtx
)
3340 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3342 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3351 code
= GET_CODE (in
);
3352 fmt
= GET_RTX_FORMAT (code
);
3353 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3357 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3360 else if (fmt
[i
] == 'E')
3361 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3362 if (loc
== &XVECEXP (in
, i
, j
)
3363 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3369 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3370 and SUBREG_BYTE, return the bit offset where the subreg begins
3371 (counting from the least significant bit of the operand). */
3374 subreg_lsb_1 (enum machine_mode outer_mode
,
3375 enum machine_mode inner_mode
,
3376 unsigned int subreg_byte
)
3378 unsigned int bitpos
;
3382 /* A paradoxical subreg begins at bit position 0. */
3383 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3386 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3387 /* If the subreg crosses a word boundary ensure that
3388 it also begins and ends on a word boundary. */
3389 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3390 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3391 && (subreg_byte
% UNITS_PER_WORD
3392 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3394 if (WORDS_BIG_ENDIAN
)
3395 word
= (GET_MODE_SIZE (inner_mode
)
3396 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3398 word
= subreg_byte
/ UNITS_PER_WORD
;
3399 bitpos
= word
* BITS_PER_WORD
;
3401 if (BYTES_BIG_ENDIAN
)
3402 byte
= (GET_MODE_SIZE (inner_mode
)
3403 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3405 byte
= subreg_byte
% UNITS_PER_WORD
;
3406 bitpos
+= byte
* BITS_PER_UNIT
;
3411 /* Given a subreg X, return the bit offset where the subreg begins
3412 (counting from the least significant bit of the reg). */
3415 subreg_lsb (const_rtx x
)
3417 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3421 /* Fill in information about a subreg of a hard register.
3422 xregno - A regno of an inner hard subreg_reg (or what will become one).
3423 xmode - The mode of xregno.
3424 offset - The byte offset.
3425 ymode - The mode of a top level SUBREG (or what may become one).
3426 info - Pointer to structure to fill in. */
3428 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3429 unsigned int offset
, enum machine_mode ymode
,
3430 struct subreg_info
*info
)
3432 int nregs_xmode
, nregs_ymode
;
3433 int mode_multiple
, nregs_multiple
;
3434 int offset_adj
, y_offset
, y_offset_adj
;
3435 int regsize_xmode
, regsize_ymode
;
3438 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3442 /* If there are holes in a non-scalar mode in registers, we expect
3443 that it is made up of its units concatenated together. */
3444 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3446 enum machine_mode xmode_unit
;
3448 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3449 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3452 xmode_unit
= GET_MODE_INNER (xmode
);
3453 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3454 gcc_assert (nregs_xmode
3455 == (GET_MODE_NUNITS (xmode
)
3456 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3457 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3458 == (hard_regno_nregs
[xregno
][xmode_unit
]
3459 * GET_MODE_NUNITS (xmode
)));
3461 /* You can only ask for a SUBREG of a value with holes in the middle
3462 if you don't cross the holes. (Such a SUBREG should be done by
3463 picking a different register class, or doing it in memory if
3464 necessary.) An example of a value with holes is XCmode on 32-bit
3465 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3466 3 for each part, but in memory it's two 128-bit parts.
3467 Padding is assumed to be at the end (not necessarily the 'high part')
3469 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3470 < GET_MODE_NUNITS (xmode
))
3471 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3472 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3473 / GET_MODE_SIZE (xmode_unit
))))
3475 info
->representable_p
= false;
3480 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3482 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3484 /* Paradoxical subregs are otherwise valid. */
3487 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3489 info
->representable_p
= true;
3490 /* If this is a big endian paradoxical subreg, which uses more
3491 actual hard registers than the original register, we must
3492 return a negative offset so that we find the proper highpart
3494 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3495 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3496 info
->offset
= nregs_xmode
- nregs_ymode
;
3499 info
->nregs
= nregs_ymode
;
3503 /* If registers store different numbers of bits in the different
3504 modes, we cannot generally form this subreg. */
3505 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3506 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3507 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3508 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3510 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3511 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3512 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3514 info
->representable_p
= false;
3516 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3517 info
->offset
= offset
/ regsize_xmode
;
3520 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3522 info
->representable_p
= false;
3524 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3525 info
->offset
= offset
/ regsize_xmode
;
3530 /* Lowpart subregs are otherwise valid. */
3531 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3533 info
->representable_p
= true;
3536 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3539 info
->nregs
= nregs_ymode
;
3544 /* This should always pass, otherwise we don't know how to verify
3545 the constraint. These conditions may be relaxed but
3546 subreg_regno_offset would need to be redesigned. */
3547 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3548 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3550 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3551 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3553 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3554 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3555 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3556 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3557 offset
= (xsize
- ysize
- off_high
) | off_low
;
3559 /* The XMODE value can be seen as a vector of NREGS_XMODE
3560 values. The subreg must represent a lowpart of given field.
3561 Compute what field it is. */
3562 offset_adj
= offset
;
3563 offset_adj
-= subreg_lowpart_offset (ymode
,
3564 mode_for_size (GET_MODE_BITSIZE (xmode
)
3568 /* Size of ymode must not be greater than the size of xmode. */
3569 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3570 gcc_assert (mode_multiple
!= 0);
3572 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3573 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3574 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3576 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3577 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3581 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3584 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3585 info
->nregs
= nregs_ymode
;
3588 /* This function returns the regno offset of a subreg expression.
3589 xregno - A regno of an inner hard subreg_reg (or what will become one).
3590 xmode - The mode of xregno.
3591 offset - The byte offset.
3592 ymode - The mode of a top level SUBREG (or what may become one).
3593 RETURN - The regno offset which would be used. */
3595 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3596 unsigned int offset
, enum machine_mode ymode
)
3598 struct subreg_info info
;
3599 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3603 /* This function returns true when the offset is representable via
3604 subreg_offset in the given regno.
3605 xregno - A regno of an inner hard subreg_reg (or what will become one).
3606 xmode - The mode of xregno.
3607 offset - The byte offset.
3608 ymode - The mode of a top level SUBREG (or what may become one).
3609 RETURN - Whether the offset is representable. */
3611 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3612 unsigned int offset
, enum machine_mode ymode
)
3614 struct subreg_info info
;
3615 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3616 return info
.representable_p
;
3619 /* Return the number of a YMODE register to which
3621 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3623 can be simplified. Return -1 if the subreg can't be simplified.
3625 XREGNO is a hard register number. */
3628 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3629 unsigned int offset
, enum machine_mode ymode
)
3631 struct subreg_info info
;
3632 unsigned int yregno
;
3634 #ifdef CANNOT_CHANGE_MODE_CLASS
3635 /* Give the backend a chance to disallow the mode change. */
3636 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3637 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3638 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3639 /* We can use mode change in LRA for some transformations. */
3640 && ! lra_in_progress
)
3644 /* We shouldn't simplify stack-related registers. */
3645 if ((!reload_completed
|| frame_pointer_needed
)
3646 && xregno
== FRAME_POINTER_REGNUM
)
3649 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3650 && xregno
== ARG_POINTER_REGNUM
)
3653 if (xregno
== STACK_POINTER_REGNUM
3654 /* We should convert hard stack register in LRA if it is
3656 && ! lra_in_progress
)
3659 /* Try to get the register offset. */
3660 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3661 if (!info
.representable_p
)
3664 /* Make sure that the offsetted register value is in range. */
3665 yregno
= xregno
+ info
.offset
;
3666 if (!HARD_REGISTER_NUM_P (yregno
))
3669 /* See whether (reg:YMODE YREGNO) is valid.
3671 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3672 This is a kludge to work around how complex FP arguments are passed
3673 on IA-64 and should be fixed. See PR target/49226. */
3674 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3675 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3678 return (int) yregno
;
3681 /* Return the final regno that a subreg expression refers to. */
3683 subreg_regno (const_rtx x
)
3686 rtx subreg
= SUBREG_REG (x
);
3687 int regno
= REGNO (subreg
);
3689 ret
= regno
+ subreg_regno_offset (regno
,
3697 /* Return the number of registers that a subreg expression refers
3700 subreg_nregs (const_rtx x
)
3702 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3705 /* Return the number of registers that a subreg REG with REGNO
3706 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3707 changed so that the regno can be passed in. */
3710 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3712 struct subreg_info info
;
3713 rtx subreg
= SUBREG_REG (x
);
3715 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3721 struct parms_set_data
3727 /* Helper function for noticing stores to parameter registers. */
3729 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3731 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3732 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3733 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3735 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3740 /* Look backward for first parameter to be loaded.
3741 Note that loads of all parameters will not necessarily be
3742 found if CSE has eliminated some of them (e.g., an argument
3743 to the outer function is passed down as a parameter).
3744 Do not skip BOUNDARY. */
3746 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3748 struct parms_set_data parm
;
3749 rtx p
, before
, first_set
;
3751 /* Since different machines initialize their parameter registers
3752 in different orders, assume nothing. Collect the set of all
3753 parameter registers. */
3754 CLEAR_HARD_REG_SET (parm
.regs
);
3756 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3757 if (GET_CODE (XEXP (p
, 0)) == USE
3758 && REG_P (XEXP (XEXP (p
, 0), 0)))
3760 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3762 /* We only care about registers which can hold function
3764 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3767 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3771 first_set
= call_insn
;
3773 /* Search backward for the first set of a register in this set. */
3774 while (parm
.nregs
&& before
!= boundary
)
3776 before
= PREV_INSN (before
);
3778 /* It is possible that some loads got CSEed from one call to
3779 another. Stop in that case. */
3780 if (CALL_P (before
))
3783 /* Our caller needs either ensure that we will find all sets
3784 (in case code has not been optimized yet), or take care
3785 for possible labels in a way by setting boundary to preceding
3787 if (LABEL_P (before
))
3789 gcc_assert (before
== boundary
);
3793 if (INSN_P (before
))
3795 int nregs_old
= parm
.nregs
;
3796 note_stores (PATTERN (before
), parms_set
, &parm
);
3797 /* If we found something that did not set a parameter reg,
3798 we're done. Do not keep going, as that might result
3799 in hoisting an insn before the setting of a pseudo
3800 that is used by the hoisted insn. */
3801 if (nregs_old
!= parm
.nregs
)
3807 return safe_as_a
<rtx_insn
*> (first_set
);
3810 /* Return true if we should avoid inserting code between INSN and preceding
3811 call instruction. */
3814 keep_with_call_p (const_rtx insn
)
3818 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3820 if (REG_P (SET_DEST (set
))
3821 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3822 && fixed_regs
[REGNO (SET_DEST (set
))]
3823 && general_operand (SET_SRC (set
), VOIDmode
))
3825 if (REG_P (SET_SRC (set
))
3826 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3827 && REG_P (SET_DEST (set
))
3828 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3830 /* There may be a stack pop just after the call and before the store
3831 of the return register. Search for the actual store when deciding
3832 if we can break or not. */
3833 if (SET_DEST (set
) == stack_pointer_rtx
)
3835 /* This CONST_CAST is okay because next_nonnote_insn just
3836 returns its argument and we assign it to a const_rtx
3838 const rtx_insn
*i2
= next_nonnote_insn (CONST_CAST_RTX (insn
));
3839 if (i2
&& keep_with_call_p (i2
))
3846 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3847 to non-complex jumps. That is, direct unconditional, conditional,
3848 and tablejumps, but not computed jumps or returns. It also does
3849 not apply to the fallthru case of a conditional jump. */
3852 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3854 rtx tmp
= JUMP_LABEL (jump_insn
);
3855 rtx_jump_table_data
*table
;
3860 if (tablejump_p (jump_insn
, NULL
, &table
))
3862 rtvec vec
= table
->get_labels ();
3863 int i
, veclen
= GET_NUM_ELEM (vec
);
3865 for (i
= 0; i
< veclen
; ++i
)
3866 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3870 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3877 /* Return an estimate of the cost of computing rtx X.
3878 One use is in cse, to decide which expression to keep in the hash table.
3879 Another is in rtl generation, to pick the cheapest way to multiply.
3880 Other uses like the latter are expected in the future.
3882 X appears as operand OPNO in an expression with code OUTER_CODE.
3883 SPEED specifies whether costs optimized for speed or size should
3887 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3898 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3899 many insns, taking N times as long. */
3900 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3904 /* Compute the default costs of certain things.
3905 Note that targetm.rtx_costs can override the defaults. */
3907 code
= GET_CODE (x
);
3911 /* Multiplication has time-complexity O(N*N), where N is the
3912 number of units (translated from digits) when using
3913 schoolbook long multiplication. */
3914 total
= factor
* factor
* COSTS_N_INSNS (5);
3920 /* Similarly, complexity for schoolbook long division. */
3921 total
= factor
* factor
* COSTS_N_INSNS (7);
3924 /* Used in combine.c as a marker. */
3928 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3929 the mode for the factor. */
3930 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3935 total
= factor
* COSTS_N_INSNS (1);
3945 /* If we can't tie these modes, make this expensive. The larger
3946 the mode, the more expensive it is. */
3947 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3948 return COSTS_N_INSNS (2 + factor
);
3952 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3957 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3958 which is already in total. */
3960 fmt
= GET_RTX_FORMAT (code
);
3961 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3963 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3964 else if (fmt
[i
] == 'E')
3965 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3966 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3971 /* Fill in the structure C with information about both speed and size rtx
3972 costs for X, which is operand OPNO in an expression with code OUTER. */
3975 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3976 struct full_rtx_costs
*c
)
3978 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3979 c
->size
= rtx_cost (x
, outer
, opno
, false);
3983 /* Return cost of address expression X.
3984 Expect that X is properly formed address reference.
3986 SPEED parameter specify whether costs optimized for speed or size should
3990 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
3992 /* We may be asked for cost of various unusual addresses, such as operands
3993 of push instruction. It is not worthwhile to complicate writing
3994 of the target hook by such cases. */
3996 if (!memory_address_addr_space_p (mode
, x
, as
))
3999 return targetm
.address_cost (x
, mode
, as
, speed
);
4002 /* If the target doesn't override, compute the cost as with arithmetic. */
4005 default_address_cost (rtx x
, enum machine_mode
, addr_space_t
, bool speed
)
4007 return rtx_cost (x
, MEM
, 0, speed
);
4011 unsigned HOST_WIDE_INT
4012 nonzero_bits (const_rtx x
, enum machine_mode mode
)
4014 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4018 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
4020 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4023 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4024 It avoids exponential behavior in nonzero_bits1 when X has
4025 identical subexpressions on the first or the second level. */
4027 static unsigned HOST_WIDE_INT
4028 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4029 enum machine_mode known_mode
,
4030 unsigned HOST_WIDE_INT known_ret
)
4032 if (x
== known_x
&& mode
== known_mode
)
4035 /* Try to find identical subexpressions. If found call
4036 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4037 precomputed value for the subexpression as KNOWN_RET. */
4039 if (ARITHMETIC_P (x
))
4041 rtx x0
= XEXP (x
, 0);
4042 rtx x1
= XEXP (x
, 1);
4044 /* Check the first level. */
4046 return nonzero_bits1 (x
, mode
, x0
, mode
,
4047 cached_nonzero_bits (x0
, mode
, known_x
,
4048 known_mode
, known_ret
));
4050 /* Check the second level. */
4051 if (ARITHMETIC_P (x0
)
4052 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4053 return nonzero_bits1 (x
, mode
, x1
, mode
,
4054 cached_nonzero_bits (x1
, mode
, known_x
,
4055 known_mode
, known_ret
));
4057 if (ARITHMETIC_P (x1
)
4058 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4059 return nonzero_bits1 (x
, mode
, x0
, mode
,
4060 cached_nonzero_bits (x0
, mode
, known_x
,
4061 known_mode
, known_ret
));
4064 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4067 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4068 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4069 is less useful. We can't allow both, because that results in exponential
4070 run time recursion. There is a nullstone testcase that triggered
4071 this. This macro avoids accidental uses of num_sign_bit_copies. */
4072 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4074 /* Given an expression, X, compute which bits in X can be nonzero.
4075 We don't care about bits outside of those defined in MODE.
4077 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4078 an arithmetic operation, we can do better. */
4080 static unsigned HOST_WIDE_INT
4081 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4082 enum machine_mode known_mode
,
4083 unsigned HOST_WIDE_INT known_ret
)
4085 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4086 unsigned HOST_WIDE_INT inner_nz
;
4088 enum machine_mode inner_mode
;
4089 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4091 /* For floating-point and vector values, assume all bits are needed. */
4092 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4093 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4096 /* If X is wider than MODE, use its mode instead. */
4097 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4099 mode
= GET_MODE (x
);
4100 nonzero
= GET_MODE_MASK (mode
);
4101 mode_width
= GET_MODE_PRECISION (mode
);
4104 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4105 /* Our only callers in this case look for single bit values. So
4106 just return the mode mask. Those tests will then be false. */
4109 #ifndef WORD_REGISTER_OPERATIONS
4110 /* If MODE is wider than X, but both are a single word for both the host
4111 and target machines, we can compute this from which bits of the
4112 object might be nonzero in its own mode, taking into account the fact
4113 that on many CISC machines, accessing an object in a wider mode
4114 causes the high-order bits to become undefined. So they are
4115 not known to be zero. */
4117 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4118 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4119 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4120 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4122 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4123 known_x
, known_mode
, known_ret
);
4124 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4129 code
= GET_CODE (x
);
4133 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4134 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4135 all the bits above ptr_mode are known to be zero. */
4136 /* As we do not know which address space the pointer is referring to,
4137 we can do this only if the target does not support different pointer
4138 or address modes depending on the address space. */
4139 if (target_default_pointer_address_modes_p ()
4140 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4142 nonzero
&= GET_MODE_MASK (ptr_mode
);
4145 /* Include declared information about alignment of pointers. */
4146 /* ??? We don't properly preserve REG_POINTER changes across
4147 pointer-to-integer casts, so we can't trust it except for
4148 things that we know must be pointers. See execute/960116-1.c. */
4149 if ((x
== stack_pointer_rtx
4150 || x
== frame_pointer_rtx
4151 || x
== arg_pointer_rtx
)
4152 && REGNO_POINTER_ALIGN (REGNO (x
)))
4154 unsigned HOST_WIDE_INT alignment
4155 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4157 #ifdef PUSH_ROUNDING
4158 /* If PUSH_ROUNDING is defined, it is possible for the
4159 stack to be momentarily aligned only to that amount,
4160 so we pick the least alignment. */
4161 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4162 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4166 nonzero
&= ~(alignment
- 1);
4170 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4171 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4172 known_mode
, known_ret
,
4176 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4177 known_mode
, known_ret
);
4179 return nonzero_for_hook
;
4183 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4184 /* If X is negative in MODE, sign-extend the value. */
4186 && mode_width
< BITS_PER_WORD
4187 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4189 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4195 #ifdef LOAD_EXTEND_OP
4196 /* In many, if not most, RISC machines, reading a byte from memory
4197 zeros the rest of the register. Noticing that fact saves a lot
4198 of extra zero-extends. */
4199 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4200 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4205 case UNEQ
: case LTGT
:
4206 case GT
: case GTU
: case UNGT
:
4207 case LT
: case LTU
: case UNLT
:
4208 case GE
: case GEU
: case UNGE
:
4209 case LE
: case LEU
: case UNLE
:
4210 case UNORDERED
: case ORDERED
:
4211 /* If this produces an integer result, we know which bits are set.
4212 Code here used to clear bits outside the mode of X, but that is
4214 /* Mind that MODE is the mode the caller wants to look at this
4215 operation in, and not the actual operation mode. We can wind
4216 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4217 that describes the results of a vector compare. */
4218 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4219 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4220 nonzero
= STORE_FLAG_VALUE
;
4225 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4226 and num_sign_bit_copies. */
4227 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4228 == GET_MODE_PRECISION (GET_MODE (x
)))
4232 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4233 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4238 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4239 and num_sign_bit_copies. */
4240 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4241 == GET_MODE_PRECISION (GET_MODE (x
)))
4247 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4248 known_x
, known_mode
, known_ret
)
4249 & GET_MODE_MASK (mode
));
4253 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4254 known_x
, known_mode
, known_ret
);
4255 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4256 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4260 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4261 Otherwise, show all the bits in the outer mode but not the inner
4263 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4264 known_x
, known_mode
, known_ret
);
4265 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4267 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4268 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4269 inner_nz
|= (GET_MODE_MASK (mode
)
4270 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4273 nonzero
&= inner_nz
;
4277 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4278 known_x
, known_mode
, known_ret
)
4279 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4280 known_x
, known_mode
, known_ret
);
4284 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4286 unsigned HOST_WIDE_INT nonzero0
4287 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4288 known_x
, known_mode
, known_ret
);
4290 /* Don't call nonzero_bits for the second time if it cannot change
4292 if ((nonzero
& nonzero0
) != nonzero
)
4294 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4295 known_x
, known_mode
, known_ret
);
4299 case PLUS
: case MINUS
:
4301 case DIV
: case UDIV
:
4302 case MOD
: case UMOD
:
4303 /* We can apply the rules of arithmetic to compute the number of
4304 high- and low-order zero bits of these operations. We start by
4305 computing the width (position of the highest-order nonzero bit)
4306 and the number of low-order zero bits for each value. */
4308 unsigned HOST_WIDE_INT nz0
4309 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4310 known_x
, known_mode
, known_ret
);
4311 unsigned HOST_WIDE_INT nz1
4312 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4313 known_x
, known_mode
, known_ret
);
4314 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4315 int width0
= floor_log2 (nz0
) + 1;
4316 int width1
= floor_log2 (nz1
) + 1;
4317 int low0
= floor_log2 (nz0
& -nz0
);
4318 int low1
= floor_log2 (nz1
& -nz1
);
4319 unsigned HOST_WIDE_INT op0_maybe_minusp
4320 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4321 unsigned HOST_WIDE_INT op1_maybe_minusp
4322 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4323 unsigned int result_width
= mode_width
;
4329 result_width
= MAX (width0
, width1
) + 1;
4330 result_low
= MIN (low0
, low1
);
4333 result_low
= MIN (low0
, low1
);
4336 result_width
= width0
+ width1
;
4337 result_low
= low0
+ low1
;
4342 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4343 result_width
= width0
;
4348 result_width
= width0
;
4353 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4354 result_width
= MIN (width0
, width1
);
4355 result_low
= MIN (low0
, low1
);
4360 result_width
= MIN (width0
, width1
);
4361 result_low
= MIN (low0
, low1
);
4367 if (result_width
< mode_width
)
4368 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4371 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4376 if (CONST_INT_P (XEXP (x
, 1))
4377 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4378 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4382 /* If this is a SUBREG formed for a promoted variable that has
4383 been zero-extended, we know that at least the high-order bits
4384 are zero, though others might be too. */
4386 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4387 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4388 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4389 known_x
, known_mode
, known_ret
);
4391 inner_mode
= GET_MODE (SUBREG_REG (x
));
4392 /* If the inner mode is a single word for both the host and target
4393 machines, we can compute this from which bits of the inner
4394 object might be nonzero. */
4395 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4396 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4398 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4399 known_x
, known_mode
, known_ret
);
4401 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4402 /* If this is a typical RISC machine, we only have to worry
4403 about the way loads are extended. */
4404 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4405 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4406 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4407 || !MEM_P (SUBREG_REG (x
)))
4410 /* On many CISC machines, accessing an object in a wider mode
4411 causes the high-order bits to become undefined. So they are
4412 not known to be zero. */
4413 if (GET_MODE_PRECISION (GET_MODE (x
))
4414 > GET_MODE_PRECISION (inner_mode
))
4415 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4416 & ~GET_MODE_MASK (inner_mode
));
4425 /* The nonzero bits are in two classes: any bits within MODE
4426 that aren't in GET_MODE (x) are always significant. The rest of the
4427 nonzero bits are those that are significant in the operand of
4428 the shift when shifted the appropriate number of bits. This
4429 shows that high-order bits are cleared by the right shift and
4430 low-order bits by left shifts. */
4431 if (CONST_INT_P (XEXP (x
, 1))
4432 && INTVAL (XEXP (x
, 1)) >= 0
4433 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4434 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4436 enum machine_mode inner_mode
= GET_MODE (x
);
4437 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4438 int count
= INTVAL (XEXP (x
, 1));
4439 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4440 unsigned HOST_WIDE_INT op_nonzero
4441 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4442 known_x
, known_mode
, known_ret
);
4443 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4444 unsigned HOST_WIDE_INT outer
= 0;
4446 if (mode_width
> width
)
4447 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4449 if (code
== LSHIFTRT
)
4451 else if (code
== ASHIFTRT
)
4455 /* If the sign bit may have been nonzero before the shift, we
4456 need to mark all the places it could have been copied to
4457 by the shift as possibly nonzero. */
4458 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4459 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4462 else if (code
== ASHIFT
)
4465 inner
= ((inner
<< (count
% width
)
4466 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4468 nonzero
&= (outer
| inner
);
4474 /* This is at most the number of bits in the mode. */
4475 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4479 /* If CLZ has a known value at zero, then the nonzero bits are
4480 that value, plus the number of bits in the mode minus one. */
4481 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4483 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4489 /* If CTZ has a known value at zero, then the nonzero bits are
4490 that value, plus the number of bits in the mode minus one. */
4491 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4493 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4499 /* This is at most the number of bits in the mode minus 1. */
4500 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4509 unsigned HOST_WIDE_INT nonzero_true
4510 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4511 known_x
, known_mode
, known_ret
);
4513 /* Don't call nonzero_bits for the second time if it cannot change
4515 if ((nonzero
& nonzero_true
) != nonzero
)
4516 nonzero
&= nonzero_true
4517 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4518 known_x
, known_mode
, known_ret
);
4529 /* See the macro definition above. */
4530 #undef cached_num_sign_bit_copies
4533 /* The function cached_num_sign_bit_copies is a wrapper around
4534 num_sign_bit_copies1. It avoids exponential behavior in
4535 num_sign_bit_copies1 when X has identical subexpressions on the
4536 first or the second level. */
4539 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4540 enum machine_mode known_mode
,
4541 unsigned int known_ret
)
4543 if (x
== known_x
&& mode
== known_mode
)
4546 /* Try to find identical subexpressions. If found call
4547 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4548 the precomputed value for the subexpression as KNOWN_RET. */
4550 if (ARITHMETIC_P (x
))
4552 rtx x0
= XEXP (x
, 0);
4553 rtx x1
= XEXP (x
, 1);
4555 /* Check the first level. */
4558 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4559 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4563 /* Check the second level. */
4564 if (ARITHMETIC_P (x0
)
4565 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4567 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4568 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4572 if (ARITHMETIC_P (x1
)
4573 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4575 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4576 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4581 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4584 /* Return the number of bits at the high-order end of X that are known to
4585 be equal to the sign bit. X will be used in mode MODE; if MODE is
4586 VOIDmode, X will be used in its own mode. The returned value will always
4587 be between 1 and the number of bits in MODE. */
4590 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4591 enum machine_mode known_mode
,
4592 unsigned int known_ret
)
4594 enum rtx_code code
= GET_CODE (x
);
4595 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4596 int num0
, num1
, result
;
4597 unsigned HOST_WIDE_INT nonzero
;
4599 /* If we weren't given a mode, use the mode of X. If the mode is still
4600 VOIDmode, we don't know anything. Likewise if one of the modes is
4603 if (mode
== VOIDmode
)
4604 mode
= GET_MODE (x
);
4606 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4607 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4610 /* For a smaller object, just ignore the high bits. */
4611 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4613 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4614 known_x
, known_mode
, known_ret
);
4616 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4619 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4621 #ifndef WORD_REGISTER_OPERATIONS
4622 /* If this machine does not do all register operations on the entire
4623 register and MODE is wider than the mode of X, we can say nothing
4624 at all about the high-order bits. */
4627 /* Likewise on machines that do, if the mode of the object is smaller
4628 than a word and loads of that size don't sign extend, we can say
4629 nothing about the high order bits. */
4630 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4631 #ifdef LOAD_EXTEND_OP
4632 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4643 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4644 /* If pointers extend signed and this is a pointer in Pmode, say that
4645 all the bits above ptr_mode are known to be sign bit copies. */
4646 /* As we do not know which address space the pointer is referring to,
4647 we can do this only if the target does not support different pointer
4648 or address modes depending on the address space. */
4649 if (target_default_pointer_address_modes_p ()
4650 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4651 && mode
== Pmode
&& REG_POINTER (x
))
4652 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4656 unsigned int copies_for_hook
= 1, copies
= 1;
4657 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4658 known_mode
, known_ret
,
4662 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4663 known_mode
, known_ret
);
4665 if (copies
> 1 || copies_for_hook
> 1)
4666 return MAX (copies
, copies_for_hook
);
4668 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4673 #ifdef LOAD_EXTEND_OP
4674 /* Some RISC machines sign-extend all loads of smaller than a word. */
4675 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4676 return MAX (1, ((int) bitwidth
4677 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4682 /* If the constant is negative, take its 1's complement and remask.
4683 Then see how many zero bits we have. */
4684 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4685 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4686 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4687 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4689 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4692 /* If this is a SUBREG for a promoted object that is sign-extended
4693 and we are looking at it in a wider mode, we know that at least the
4694 high-order bits are known to be sign bit copies. */
4696 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4698 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4699 known_x
, known_mode
, known_ret
);
4700 return MAX ((int) bitwidth
4701 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4705 /* For a smaller object, just ignore the high bits. */
4706 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4708 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4709 known_x
, known_mode
, known_ret
);
4710 return MAX (1, (num0
4711 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4715 #ifdef WORD_REGISTER_OPERATIONS
4716 #ifdef LOAD_EXTEND_OP
4717 /* For paradoxical SUBREGs on machines where all register operations
4718 affect the entire register, just look inside. Note that we are
4719 passing MODE to the recursive call, so the number of sign bit copies
4720 will remain relative to that mode, not the inner mode. */
4722 /* This works only if loads sign extend. Otherwise, if we get a
4723 reload for the inner part, it may be loaded from the stack, and
4724 then we lose all sign bit copies that existed before the store
4727 if (paradoxical_subreg_p (x
)
4728 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4729 && MEM_P (SUBREG_REG (x
)))
4730 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4731 known_x
, known_mode
, known_ret
);
4737 if (CONST_INT_P (XEXP (x
, 1)))
4738 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4742 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4743 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4744 known_x
, known_mode
, known_ret
));
4747 /* For a smaller object, just ignore the high bits. */
4748 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4749 known_x
, known_mode
, known_ret
);
4750 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4754 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4755 known_x
, known_mode
, known_ret
);
4757 case ROTATE
: case ROTATERT
:
4758 /* If we are rotating left by a number of bits less than the number
4759 of sign bit copies, we can just subtract that amount from the
4761 if (CONST_INT_P (XEXP (x
, 1))
4762 && INTVAL (XEXP (x
, 1)) >= 0
4763 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4765 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4766 known_x
, known_mode
, known_ret
);
4767 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4768 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4773 /* In general, this subtracts one sign bit copy. But if the value
4774 is known to be positive, the number of sign bit copies is the
4775 same as that of the input. Finally, if the input has just one bit
4776 that might be nonzero, all the bits are copies of the sign bit. */
4777 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4778 known_x
, known_mode
, known_ret
);
4779 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4780 return num0
> 1 ? num0
- 1 : 1;
4782 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4787 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4792 case IOR
: case AND
: case XOR
:
4793 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4794 /* Logical operations will preserve the number of sign-bit copies.
4795 MIN and MAX operations always return one of the operands. */
4796 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4797 known_x
, known_mode
, known_ret
);
4798 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4799 known_x
, known_mode
, known_ret
);
4801 /* If num1 is clearing some of the top bits then regardless of
4802 the other term, we are guaranteed to have at least that many
4803 high-order zero bits. */
4806 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4807 && CONST_INT_P (XEXP (x
, 1))
4808 && (UINTVAL (XEXP (x
, 1))
4809 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4812 /* Similarly for IOR when setting high-order bits. */
4815 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4816 && CONST_INT_P (XEXP (x
, 1))
4817 && (UINTVAL (XEXP (x
, 1))
4818 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4821 return MIN (num0
, num1
);
4823 case PLUS
: case MINUS
:
4824 /* For addition and subtraction, we can have a 1-bit carry. However,
4825 if we are subtracting 1 from a positive number, there will not
4826 be such a carry. Furthermore, if the positive number is known to
4827 be 0 or 1, we know the result is either -1 or 0. */
4829 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4830 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4832 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4833 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4834 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4835 : bitwidth
- floor_log2 (nonzero
) - 1);
4838 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4839 known_x
, known_mode
, known_ret
);
4840 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4841 known_x
, known_mode
, known_ret
);
4842 result
= MAX (1, MIN (num0
, num1
) - 1);
4847 /* The number of bits of the product is the sum of the number of
4848 bits of both terms. However, unless one of the terms if known
4849 to be positive, we must allow for an additional bit since negating
4850 a negative number can remove one sign bit copy. */
4852 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4853 known_x
, known_mode
, known_ret
);
4854 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4855 known_x
, known_mode
, known_ret
);
4857 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4859 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4860 || (((nonzero_bits (XEXP (x
, 0), mode
)
4861 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4862 && ((nonzero_bits (XEXP (x
, 1), mode
)
4863 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4867 return MAX (1, result
);
4870 /* The result must be <= the first operand. If the first operand
4871 has the high bit set, we know nothing about the number of sign
4873 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4875 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4876 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4879 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4880 known_x
, known_mode
, known_ret
);
4883 /* The result must be <= the second operand. If the second operand
4884 has (or just might have) the high bit set, we know nothing about
4885 the number of sign bit copies. */
4886 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4888 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4889 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4892 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4893 known_x
, known_mode
, known_ret
);
4896 /* Similar to unsigned division, except that we have to worry about
4897 the case where the divisor is negative, in which case we have
4899 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4900 known_x
, known_mode
, known_ret
);
4902 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4903 || (nonzero_bits (XEXP (x
, 1), mode
)
4904 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4910 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4911 known_x
, known_mode
, known_ret
);
4913 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4914 || (nonzero_bits (XEXP (x
, 1), mode
)
4915 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4921 /* Shifts by a constant add to the number of bits equal to the
4923 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4924 known_x
, known_mode
, known_ret
);
4925 if (CONST_INT_P (XEXP (x
, 1))
4926 && INTVAL (XEXP (x
, 1)) > 0
4927 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4928 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4933 /* Left shifts destroy copies. */
4934 if (!CONST_INT_P (XEXP (x
, 1))
4935 || INTVAL (XEXP (x
, 1)) < 0
4936 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4937 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4940 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4941 known_x
, known_mode
, known_ret
);
4942 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4945 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4946 known_x
, known_mode
, known_ret
);
4947 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4948 known_x
, known_mode
, known_ret
);
4949 return MIN (num0
, num1
);
4951 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4952 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4953 case GEU
: case GTU
: case LEU
: case LTU
:
4954 case UNORDERED
: case ORDERED
:
4955 /* If the constant is negative, take its 1's complement and remask.
4956 Then see how many zero bits we have. */
4957 nonzero
= STORE_FLAG_VALUE
;
4958 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4959 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4960 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4962 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4968 /* If we haven't been able to figure it out by one of the above rules,
4969 see if some of the high-order bits are known to be zero. If so,
4970 count those bits and return one less than that amount. If we can't
4971 safely compute the mask for this mode, always return BITWIDTH. */
4973 bitwidth
= GET_MODE_PRECISION (mode
);
4974 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4977 nonzero
= nonzero_bits (x
, mode
);
4978 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4979 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4982 /* Calculate the rtx_cost of a single instruction. A return value of
4983 zero indicates an instruction pattern without a known cost. */
4986 insn_rtx_cost (rtx pat
, bool speed
)
4991 /* Extract the single set rtx from the instruction pattern.
4992 We can't use single_set since we only have the pattern. */
4993 if (GET_CODE (pat
) == SET
)
4995 else if (GET_CODE (pat
) == PARALLEL
)
4998 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5000 rtx x
= XVECEXP (pat
, 0, i
);
5001 if (GET_CODE (x
) == SET
)
5014 cost
= set_src_cost (SET_SRC (set
), speed
);
5015 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5018 /* Given an insn INSN and condition COND, return the condition in a
5019 canonical form to simplify testing by callers. Specifically:
5021 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5022 (2) Both operands will be machine operands; (cc0) will have been replaced.
5023 (3) If an operand is a constant, it will be the second operand.
5024 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5025 for GE, GEU, and LEU.
5027 If the condition cannot be understood, or is an inequality floating-point
5028 comparison which needs to be reversed, 0 will be returned.
5030 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5032 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5033 insn used in locating the condition was found. If a replacement test
5034 of the condition is desired, it should be placed in front of that
5035 insn and we will be sure that the inputs are still valid.
5037 If WANT_REG is nonzero, we wish the condition to be relative to that
5038 register, if possible. Therefore, do not canonicalize the condition
5039 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5040 to be a compare to a CC mode register.
5042 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5046 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5047 rtx_insn
**earliest
,
5048 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5051 rtx_insn
*prev
= insn
;
5055 int reverse_code
= 0;
5056 enum machine_mode mode
;
5057 basic_block bb
= BLOCK_FOR_INSN (insn
);
5059 code
= GET_CODE (cond
);
5060 mode
= GET_MODE (cond
);
5061 op0
= XEXP (cond
, 0);
5062 op1
= XEXP (cond
, 1);
5065 code
= reversed_comparison_code (cond
, insn
);
5066 if (code
== UNKNOWN
)
5072 /* If we are comparing a register with zero, see if the register is set
5073 in the previous insn to a COMPARE or a comparison operation. Perform
5074 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5077 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5078 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5079 && op1
== CONST0_RTX (GET_MODE (op0
))
5082 /* Set nonzero when we find something of interest. */
5086 /* If comparison with cc0, import actual comparison from compare
5090 if ((prev
= prev_nonnote_insn (prev
)) == 0
5091 || !NONJUMP_INSN_P (prev
)
5092 || (set
= single_set (prev
)) == 0
5093 || SET_DEST (set
) != cc0_rtx
)
5096 op0
= SET_SRC (set
);
5097 op1
= CONST0_RTX (GET_MODE (op0
));
5103 /* If this is a COMPARE, pick up the two things being compared. */
5104 if (GET_CODE (op0
) == COMPARE
)
5106 op1
= XEXP (op0
, 1);
5107 op0
= XEXP (op0
, 0);
5110 else if (!REG_P (op0
))
5113 /* Go back to the previous insn. Stop if it is not an INSN. We also
5114 stop if it isn't a single set or if it has a REG_INC note because
5115 we don't want to bother dealing with it. */
5117 prev
= prev_nonnote_nondebug_insn (prev
);
5120 || !NONJUMP_INSN_P (prev
)
5121 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5122 /* In cfglayout mode, there do not have to be labels at the
5123 beginning of a block, or jumps at the end, so the previous
5124 conditions would not stop us when we reach bb boundary. */
5125 || BLOCK_FOR_INSN (prev
) != bb
)
5128 set
= set_of (op0
, prev
);
5131 && (GET_CODE (set
) != SET
5132 || !rtx_equal_p (SET_DEST (set
), op0
)))
5135 /* If this is setting OP0, get what it sets it to if it looks
5139 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5140 #ifdef FLOAT_STORE_FLAG_VALUE
5141 REAL_VALUE_TYPE fsfv
;
5144 /* ??? We may not combine comparisons done in a CCmode with
5145 comparisons not done in a CCmode. This is to aid targets
5146 like Alpha that have an IEEE compliant EQ instruction, and
5147 a non-IEEE compliant BEQ instruction. The use of CCmode is
5148 actually artificial, simply to prevent the combination, but
5149 should not affect other platforms.
5151 However, we must allow VOIDmode comparisons to match either
5152 CCmode or non-CCmode comparison, because some ports have
5153 modeless comparisons inside branch patterns.
5155 ??? This mode check should perhaps look more like the mode check
5156 in simplify_comparison in combine. */
5157 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5158 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5160 && inner_mode
!= VOIDmode
)
5162 if (GET_CODE (SET_SRC (set
)) == COMPARE
5165 && val_signbit_known_set_p (inner_mode
,
5167 #ifdef FLOAT_STORE_FLAG_VALUE
5169 && SCALAR_FLOAT_MODE_P (inner_mode
)
5170 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5171 REAL_VALUE_NEGATIVE (fsfv
)))
5174 && COMPARISON_P (SET_SRC (set
))))
5176 else if (((code
== EQ
5178 && val_signbit_known_set_p (inner_mode
,
5180 #ifdef FLOAT_STORE_FLAG_VALUE
5182 && SCALAR_FLOAT_MODE_P (inner_mode
)
5183 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5184 REAL_VALUE_NEGATIVE (fsfv
)))
5187 && COMPARISON_P (SET_SRC (set
)))
5192 else if ((code
== EQ
|| code
== NE
)
5193 && GET_CODE (SET_SRC (set
)) == XOR
)
5194 /* Handle sequences like:
5197 ...(eq|ne op0 (const_int 0))...
5201 (eq op0 (const_int 0)) reduces to (eq X Y)
5202 (ne op0 (const_int 0)) reduces to (ne X Y)
5204 This is the form used by MIPS16, for example. */
5210 else if (reg_set_p (op0
, prev
))
5211 /* If this sets OP0, but not directly, we have to give up. */
5216 /* If the caller is expecting the condition to be valid at INSN,
5217 make sure X doesn't change before INSN. */
5218 if (valid_at_insn_p
)
5219 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5221 if (COMPARISON_P (x
))
5222 code
= GET_CODE (x
);
5225 code
= reversed_comparison_code (x
, prev
);
5226 if (code
== UNKNOWN
)
5231 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5237 /* If constant is first, put it last. */
5238 if (CONSTANT_P (op0
))
5239 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5241 /* If OP0 is the result of a comparison, we weren't able to find what
5242 was really being compared, so fail. */
5244 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5247 /* Canonicalize any ordered comparison with integers involving equality
5248 if we can do computations in the relevant mode and we do not
5251 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5252 && CONST_INT_P (op1
)
5253 && GET_MODE (op0
) != VOIDmode
5254 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5256 HOST_WIDE_INT const_val
= INTVAL (op1
);
5257 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5258 unsigned HOST_WIDE_INT max_val
5259 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5264 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5265 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5268 /* When cross-compiling, const_val might be sign-extended from
5269 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5271 if ((const_val
& max_val
)
5272 != ((unsigned HOST_WIDE_INT
) 1
5273 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5274 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5278 if (uconst_val
< max_val
)
5279 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5283 if (uconst_val
!= 0)
5284 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5292 /* Never return CC0; return zero instead. */
5296 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5299 /* Given a jump insn JUMP, return the condition that will cause it to branch
5300 to its JUMP_LABEL. If the condition cannot be understood, or is an
5301 inequality floating-point comparison which needs to be reversed, 0 will
5304 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5305 insn used in locating the condition was found. If a replacement test
5306 of the condition is desired, it should be placed in front of that
5307 insn and we will be sure that the inputs are still valid. If EARLIEST
5308 is null, the returned condition will be valid at INSN.
5310 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5311 compare CC mode register.
5313 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5316 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5317 int valid_at_insn_p
)
5323 /* If this is not a standard conditional jump, we can't parse it. */
5325 || ! any_condjump_p (jump
))
5327 set
= pc_set (jump
);
5329 cond
= XEXP (SET_SRC (set
), 0);
5331 /* If this branches to JUMP_LABEL when the condition is false, reverse
5334 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5335 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
5337 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5338 allow_cc_mode
, valid_at_insn_p
);
5341 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5342 TARGET_MODE_REP_EXTENDED.
5344 Note that we assume that the property of
5345 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5346 narrower than mode B. I.e., if A is a mode narrower than B then in
5347 order to be able to operate on it in mode B, mode A needs to
5348 satisfy the requirements set by the representation of mode B. */
5351 init_num_sign_bit_copies_in_rep (void)
5353 enum machine_mode mode
, in_mode
;
5355 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5356 in_mode
= GET_MODE_WIDER_MODE (mode
))
5357 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5358 mode
= GET_MODE_WIDER_MODE (mode
))
5360 enum machine_mode i
;
5362 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5363 extends to the next widest mode. */
5364 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5365 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5367 /* We are in in_mode. Count how many bits outside of mode
5368 have to be copies of the sign-bit. */
5369 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5371 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5373 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5374 /* We can only check sign-bit copies starting from the
5375 top-bit. In order to be able to check the bits we
5376 have already seen we pretend that subsequent bits
5377 have to be sign-bit copies too. */
5378 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5379 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5380 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5385 /* Suppose that truncation from the machine mode of X to MODE is not a
5386 no-op. See if there is anything special about X so that we can
5387 assume it already contains a truncated value of MODE. */
5390 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5392 /* This register has already been used in MODE without explicit
5394 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5397 /* See if we already satisfy the requirements of MODE. If yes we
5398 can just switch to MODE. */
5399 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5400 && (num_sign_bit_copies (x
, GET_MODE (x
))
5401 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5407 /* Return true if RTX code CODE has a single sequence of zero or more
5408 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5409 entry in that case. */
5412 setup_reg_subrtx_bounds (unsigned int code
)
5414 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5416 for (; format
[i
] != 'e'; ++i
)
5419 /* No subrtxes. Leave start and count as 0. */
5421 if (format
[i
] == 'E' || format
[i
] == 'V')
5425 /* Record the sequence of 'e's. */
5426 rtx_all_subrtx_bounds
[code
].start
= i
;
5429 while (format
[i
] == 'e');
5430 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5431 /* rtl-iter.h relies on this. */
5432 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5434 for (; format
[i
]; ++i
)
5435 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5441 /* Initialize non_rtx_starting_operands, which is used to speed up
5442 for_each_rtx, and rtx_all_subrtx_bounds. */
5447 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5449 const char *format
= GET_RTX_FORMAT (i
);
5450 const char *first
= strpbrk (format
, "eEV");
5451 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5452 if (!setup_reg_subrtx_bounds (i
))
5453 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5454 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5455 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5458 init_num_sign_bit_copies_in_rep ();
5461 /* Check whether this is a constant pool constant. */
5463 constant_pool_constant_p (rtx x
)
5465 x
= avoid_constant_pool_reference (x
);
5466 return CONST_DOUBLE_P (x
);
5469 /* If M is a bitmask that selects a field of low-order bits within an item but
5470 not the entire word, return the length of the field. Return -1 otherwise.
5471 M is used in machine mode MODE. */
5474 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5476 if (mode
!= VOIDmode
)
5478 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5480 m
&= GET_MODE_MASK (mode
);
5483 return exact_log2 (m
+ 1);
5486 /* Return the mode of MEM's address. */
5489 get_address_mode (rtx mem
)
5491 enum machine_mode mode
;
5493 gcc_assert (MEM_P (mem
));
5494 mode
= GET_MODE (XEXP (mem
, 0));
5495 if (mode
!= VOIDmode
)
5497 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5500 /* Split up a CONST_DOUBLE or integer constant rtx
5501 into two rtx's for single words,
5502 storing in *FIRST the word that comes first in memory in the target
5503 and in *SECOND the other.
5505 TODO: This function needs to be rewritten to work on any size
5509 split_double (rtx value
, rtx
*first
, rtx
*second
)
5511 if (CONST_INT_P (value
))
5513 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5515 /* In this case the CONST_INT holds both target words.
5516 Extract the bits from it into two word-sized pieces.
5517 Sign extend each half to HOST_WIDE_INT. */
5518 unsigned HOST_WIDE_INT low
, high
;
5519 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5520 unsigned bits_per_word
= BITS_PER_WORD
;
5522 /* Set sign_bit to the most significant bit of a word. */
5524 sign_bit
<<= bits_per_word
- 1;
5526 /* Set mask so that all bits of the word are set. We could
5527 have used 1 << BITS_PER_WORD instead of basing the
5528 calculation on sign_bit. However, on machines where
5529 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5530 compiler warning, even though the code would never be
5532 mask
= sign_bit
<< 1;
5535 /* Set sign_extend as any remaining bits. */
5536 sign_extend
= ~mask
;
5538 /* Pick the lower word and sign-extend it. */
5539 low
= INTVAL (value
);
5544 /* Pick the higher word, shifted to the least significant
5545 bits, and sign-extend it. */
5546 high
= INTVAL (value
);
5547 high
>>= bits_per_word
- 1;
5550 if (high
& sign_bit
)
5551 high
|= sign_extend
;
5553 /* Store the words in the target machine order. */
5554 if (WORDS_BIG_ENDIAN
)
5556 *first
= GEN_INT (high
);
5557 *second
= GEN_INT (low
);
5561 *first
= GEN_INT (low
);
5562 *second
= GEN_INT (high
);
5567 /* The rule for using CONST_INT for a wider mode
5568 is that we regard the value as signed.
5569 So sign-extend it. */
5570 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5571 if (WORDS_BIG_ENDIAN
)
5583 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5585 /* All of this is scary code and needs to be converted to
5586 properly work with any size integer. */
5587 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5588 if (WORDS_BIG_ENDIAN
)
5590 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5591 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5595 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5596 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5599 else if (!CONST_DOUBLE_P (value
))
5601 if (WORDS_BIG_ENDIAN
)
5603 *first
= const0_rtx
;
5609 *second
= const0_rtx
;
5612 else if (GET_MODE (value
) == VOIDmode
5613 /* This is the old way we did CONST_DOUBLE integers. */
5614 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5616 /* In an integer, the words are defined as most and least significant.
5617 So order them by the target's convention. */
5618 if (WORDS_BIG_ENDIAN
)
5620 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5621 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5625 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5626 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5633 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5635 /* Note, this converts the REAL_VALUE_TYPE to the target's
5636 format, splits up the floating point double and outputs
5637 exactly 32 bits of it into each of l[0] and l[1] --
5638 not necessarily BITS_PER_WORD bits. */
5639 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5641 /* If 32 bits is an entire word for the target, but not for the host,
5642 then sign-extend on the host so that the number will look the same
5643 way on the host that it would on the target. See for instance
5644 simplify_unary_operation. The #if is needed to avoid compiler
5647 #if HOST_BITS_PER_LONG > 32
5648 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5650 if (l
[0] & ((long) 1 << 31))
5651 l
[0] |= ((long) (-1) << 32);
5652 if (l
[1] & ((long) 1 << 31))
5653 l
[1] |= ((long) (-1) << 32);
5657 *first
= GEN_INT (l
[0]);
5658 *second
= GEN_INT (l
[1]);
5662 /* Return true if X is a sign_extract or zero_extract from the least
5666 lsb_bitfield_op_p (rtx x
)
5668 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5670 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
5671 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5672 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5674 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5679 /* Strip outer address "mutations" from LOC and return a pointer to the
5680 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5681 stripped expression there.
5683 "Mutations" either convert between modes or apply some kind of
5684 extension, truncation or alignment. */
5687 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5691 enum rtx_code code
= GET_CODE (*loc
);
5692 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5693 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5694 used to convert between pointer sizes. */
5695 loc
= &XEXP (*loc
, 0);
5696 else if (lsb_bitfield_op_p (*loc
))
5697 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5698 acts as a combined truncation and extension. */
5699 loc
= &XEXP (*loc
, 0);
5700 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5701 /* (and ... (const_int -X)) is used to align to X bytes. */
5702 loc
= &XEXP (*loc
, 0);
5703 else if (code
== SUBREG
5704 && !OBJECT_P (SUBREG_REG (*loc
))
5705 && subreg_lowpart_p (*loc
))
5706 /* (subreg (operator ...) ...) inside and is used for mode
5708 loc
= &SUBREG_REG (*loc
);
5716 /* Return true if CODE applies some kind of scale. The scaled value is
5717 is the first operand and the scale is the second. */
5720 binary_scale_code_p (enum rtx_code code
)
5722 return (code
== MULT
5724 /* Needed by ARM targets. */
5728 || code
== ROTATERT
);
5731 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5732 (see address_info). Return null otherwise. */
5735 get_base_term (rtx
*inner
)
5737 if (GET_CODE (*inner
) == LO_SUM
)
5738 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5741 || GET_CODE (*inner
) == SUBREG
)
5746 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5747 (see address_info). Return null otherwise. */
5750 get_index_term (rtx
*inner
)
5752 /* At present, only constant scales are allowed. */
5753 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5754 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5757 || GET_CODE (*inner
) == SUBREG
)
5762 /* Set the segment part of address INFO to LOC, given that INNER is the
5766 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5768 gcc_assert (!info
->segment
);
5769 info
->segment
= loc
;
5770 info
->segment_term
= inner
;
5773 /* Set the base part of address INFO to LOC, given that INNER is the
5777 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5779 gcc_assert (!info
->base
);
5781 info
->base_term
= inner
;
5784 /* Set the index part of address INFO to LOC, given that INNER is the
5788 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5790 gcc_assert (!info
->index
);
5792 info
->index_term
= inner
;
5795 /* Set the displacement part of address INFO to LOC, given that INNER
5796 is the constant term. */
5799 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5801 gcc_assert (!info
->disp
);
5803 info
->disp_term
= inner
;
5806 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5807 rest of INFO accordingly. */
5810 decompose_incdec_address (struct address_info
*info
)
5812 info
->autoinc_p
= true;
5814 rtx
*base
= &XEXP (*info
->inner
, 0);
5815 set_address_base (info
, base
, base
);
5816 gcc_checking_assert (info
->base
== info
->base_term
);
5818 /* These addresses are only valid when the size of the addressed
5820 gcc_checking_assert (info
->mode
!= VOIDmode
);
5823 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5824 of INFO accordingly. */
5827 decompose_automod_address (struct address_info
*info
)
5829 info
->autoinc_p
= true;
5831 rtx
*base
= &XEXP (*info
->inner
, 0);
5832 set_address_base (info
, base
, base
);
5833 gcc_checking_assert (info
->base
== info
->base_term
);
5835 rtx plus
= XEXP (*info
->inner
, 1);
5836 gcc_assert (GET_CODE (plus
) == PLUS
);
5838 info
->base_term2
= &XEXP (plus
, 0);
5839 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5841 rtx
*step
= &XEXP (plus
, 1);
5842 rtx
*inner_step
= strip_address_mutations (step
);
5843 if (CONSTANT_P (*inner_step
))
5844 set_address_disp (info
, step
, inner_step
);
5846 set_address_index (info
, step
, inner_step
);
5849 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5850 values in [PTR, END). Return a pointer to the end of the used array. */
5853 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5856 if (GET_CODE (x
) == PLUS
)
5858 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5859 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5863 gcc_assert (ptr
!= end
);
5869 /* Evaluate the likelihood of X being a base or index value, returning
5870 positive if it is likely to be a base, negative if it is likely to be
5871 an index, and 0 if we can't tell. Make the magnitude of the return
5872 value reflect the amount of confidence we have in the answer.
5874 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5877 baseness (rtx x
, enum machine_mode mode
, addr_space_t as
,
5878 enum rtx_code outer_code
, enum rtx_code index_code
)
5880 /* Believe *_POINTER unless the address shape requires otherwise. */
5881 if (REG_P (x
) && REG_POINTER (x
))
5883 if (MEM_P (x
) && MEM_POINTER (x
))
5886 if (REG_P (x
) && HARD_REGISTER_P (x
))
5888 /* X is a hard register. If it only fits one of the base
5889 or index classes, choose that interpretation. */
5890 int regno
= REGNO (x
);
5891 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5892 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5893 if (base_p
!= index_p
)
5894 return base_p
? 1 : -1;
5899 /* INFO->INNER describes a normal, non-automodified address.
5900 Fill in the rest of INFO accordingly. */
5903 decompose_normal_address (struct address_info
*info
)
5905 /* Treat the address as the sum of up to four values. */
5907 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5908 ops
+ ARRAY_SIZE (ops
)) - ops
;
5910 /* If there is more than one component, any base component is in a PLUS. */
5912 info
->base_outer_code
= PLUS
;
5914 /* Try to classify each sum operand now. Leave those that could be
5915 either a base or an index in OPS. */
5918 for (size_t in
= 0; in
< n_ops
; ++in
)
5921 rtx
*inner
= strip_address_mutations (loc
);
5922 if (CONSTANT_P (*inner
))
5923 set_address_disp (info
, loc
, inner
);
5924 else if (GET_CODE (*inner
) == UNSPEC
)
5925 set_address_segment (info
, loc
, inner
);
5928 /* The only other possibilities are a base or an index. */
5929 rtx
*base_term
= get_base_term (inner
);
5930 rtx
*index_term
= get_index_term (inner
);
5931 gcc_assert (base_term
|| index_term
);
5933 set_address_index (info
, loc
, index_term
);
5934 else if (!index_term
)
5935 set_address_base (info
, loc
, base_term
);
5938 gcc_assert (base_term
== index_term
);
5940 inner_ops
[out
] = base_term
;
5946 /* Classify the remaining OPS members as bases and indexes. */
5949 /* If we haven't seen a base or an index yet, assume that this is
5950 the base. If we were confident that another term was the base
5951 or index, treat the remaining operand as the other kind. */
5953 set_address_base (info
, ops
[0], inner_ops
[0]);
5955 set_address_index (info
, ops
[0], inner_ops
[0]);
5959 /* In the event of a tie, assume the base comes first. */
5960 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5962 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5963 GET_CODE (*ops
[0])))
5965 set_address_base (info
, ops
[0], inner_ops
[0]);
5966 set_address_index (info
, ops
[1], inner_ops
[1]);
5970 set_address_base (info
, ops
[1], inner_ops
[1]);
5971 set_address_index (info
, ops
[0], inner_ops
[0]);
5975 gcc_assert (out
== 0);
5978 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5979 or VOIDmode if not known. AS is the address space associated with LOC.
5980 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5983 decompose_address (struct address_info
*info
, rtx
*loc
, enum machine_mode mode
,
5984 addr_space_t as
, enum rtx_code outer_code
)
5986 memset (info
, 0, sizeof (*info
));
5989 info
->addr_outer_code
= outer_code
;
5991 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5992 info
->base_outer_code
= outer_code
;
5993 switch (GET_CODE (*info
->inner
))
5999 decompose_incdec_address (info
);
6004 decompose_automod_address (info
);
6008 decompose_normal_address (info
);
6013 /* Describe address operand LOC in INFO. */
6016 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6018 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6021 /* Describe the address of MEM X in INFO. */
6024 decompose_mem_address (struct address_info
*info
, rtx x
)
6026 gcc_assert (MEM_P (x
));
6027 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6028 MEM_ADDR_SPACE (x
), MEM
);
6031 /* Update INFO after a change to the address it describes. */
6034 update_address (struct address_info
*info
)
6036 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6037 info
->addr_outer_code
);
6040 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6041 more complicated than that. */
6044 get_index_scale (const struct address_info
*info
)
6046 rtx index
= *info
->index
;
6047 if (GET_CODE (index
) == MULT
6048 && CONST_INT_P (XEXP (index
, 1))
6049 && info
->index_term
== &XEXP (index
, 0))
6050 return INTVAL (XEXP (index
, 1));
6052 if (GET_CODE (index
) == ASHIFT
6053 && CONST_INT_P (XEXP (index
, 1))
6054 && info
->index_term
== &XEXP (index
, 0))
6055 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
6057 if (info
->index
== info
->index_term
)
6063 /* Return the "index code" of INFO, in the form required by
6067 get_index_code (const struct address_info
*info
)
6070 return GET_CODE (*info
->index
);
6073 return GET_CODE (*info
->disp
);
6078 /* Return 1 if *X is a thread-local symbol. */
6081 tls_referenced_p_1 (rtx
*x
, void *)
6083 return GET_CODE (*x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*x
) != 0;
6086 /* Return true if X contains a thread-local symbol. */
6089 tls_referenced_p (rtx x
)
6091 if (!targetm
.have_tls
)
6094 return for_each_rtx (&x
, &tls_referenced_p_1
, 0);