1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
38 #include "hard-reg-set.h"
39 #include "function-abi.h"
41 /* Forward declarations */
42 static void set_of_1 (rtx
, const_rtx
, void *);
43 static bool covers_regno_p (const_rtx
, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
45 static int computed_jump_p_1 (const_rtx
);
46 static void parms_set (rtx
, const_rtx
, void *);
48 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, scalar_int_mode
,
49 const_rtx
, machine_mode
,
50 unsigned HOST_WIDE_INT
);
51 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, scalar_int_mode
,
52 const_rtx
, machine_mode
,
53 unsigned HOST_WIDE_INT
);
54 static unsigned int cached_num_sign_bit_copies (const_rtx
, scalar_int_mode
,
55 const_rtx
, machine_mode
,
57 static unsigned int num_sign_bit_copies1 (const_rtx
, scalar_int_mode
,
58 const_rtx
, machine_mode
,
61 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
62 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
64 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
65 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
66 SIGN_EXTEND then while narrowing we also have to enforce the
67 representation and sign-extend the value to mode DESTINATION_REP.
69 If the value is already sign-extended to DESTINATION_REP mode we
70 can just switch to DESTINATION mode on it. For each pair of
71 integral modes SOURCE and DESTINATION, when truncating from SOURCE
72 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
73 contains the number of high-order bits in SOURCE that have to be
74 copies of the sign-bit so that we can do this mode-switch to
78 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
80 /* Store X into index I of ARRAY. ARRAY is known to have at least I
81 elements. Return the new base of ARRAY. */
84 typename
T::value_type
*
85 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
87 size_t i
, value_type x
)
89 if (base
== array
.stack
)
96 gcc_checking_assert (i
== LOCAL_ELEMS
);
97 /* A previous iteration might also have moved from the stack to the
98 heap, in which case the heap array will already be big enough. */
99 if (vec_safe_length (array
.heap
) <= i
)
100 vec_safe_grow (array
.heap
, i
+ 1);
101 base
= array
.heap
->address ();
102 memcpy (base
, array
.stack
, sizeof (array
.stack
));
103 base
[LOCAL_ELEMS
] = x
;
106 unsigned int length
= array
.heap
->length ();
109 gcc_checking_assert (base
== array
.heap
->address ());
115 gcc_checking_assert (i
== length
);
116 vec_safe_push (array
.heap
, x
);
117 return array
.heap
->address ();
121 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
122 number of elements added to the worklist. */
124 template <typename T
>
126 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
128 size_t end
, rtx_type x
)
130 enum rtx_code code
= GET_CODE (x
);
131 const char *format
= GET_RTX_FORMAT (code
);
132 size_t orig_end
= end
;
133 if (__builtin_expect (INSN_P (x
), false))
135 /* Put the pattern at the top of the queue, since that's what
136 we're likely to want most. It also allows for the SEQUENCE
138 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
139 if (format
[i
] == 'e')
141 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
142 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
145 base
= add_single_to_queue (array
, base
, end
++, subx
);
149 for (int i
= 0; format
[i
]; ++i
)
150 if (format
[i
] == 'e')
152 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
153 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
156 base
= add_single_to_queue (array
, base
, end
++, subx
);
158 else if (format
[i
] == 'E')
160 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
161 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
162 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
163 for (unsigned int j
= 0; j
< length
; j
++)
164 base
[end
++] = T::get_value (vec
[j
]);
166 for (unsigned int j
= 0; j
< length
; j
++)
167 base
= add_single_to_queue (array
, base
, end
++,
168 T::get_value (vec
[j
]));
169 if (code
== SEQUENCE
&& end
== length
)
170 /* If the subrtxes of the sequence fill the entire array then
171 we know that no other parts of a containing insn are queued.
172 The caller is therefore iterating over the sequence as a
173 PATTERN (...), so we also want the patterns of the
175 for (unsigned int j
= 0; j
< length
; j
++)
177 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
179 base
[j
] = T::get_value (PATTERN (x
));
182 return end
- orig_end
;
185 template <typename T
>
187 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
189 vec_free (array
.heap
);
192 template <typename T
>
193 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
195 template class generic_subrtx_iterator
<const_rtx_accessor
>;
196 template class generic_subrtx_iterator
<rtx_var_accessor
>;
197 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
199 /* Return 1 if the value of X is unstable
200 (would be different at a different point in the program).
201 The frame pointer, arg pointer, etc. are considered stable
202 (within one function) and so is anything marked `unchanging'. */
205 rtx_unstable_p (const_rtx x
)
207 const RTX_CODE code
= GET_CODE (x
);
214 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
223 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
224 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
225 /* The arg pointer varies if it is not a fixed register. */
226 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
228 /* ??? When call-clobbered, the value is stable modulo the restore
229 that must happen after a call. This currently screws up local-alloc
230 into believing that the restore is not needed. */
231 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
236 if (MEM_VOLATILE_P (x
))
245 fmt
= GET_RTX_FORMAT (code
);
246 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
249 if (rtx_unstable_p (XEXP (x
, i
)))
252 else if (fmt
[i
] == 'E')
255 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
256 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
263 /* Return 1 if X has a value that can vary even between two
264 executions of the program. 0 means X can be compared reliably
265 against certain constants or near-constants.
266 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
267 zero, we are slightly more conservative.
268 The frame pointer and the arg pointer are considered constant. */
271 rtx_varies_p (const_rtx x
, bool for_alias
)
284 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
293 /* Note that we have to test for the actual rtx used for the frame
294 and arg pointers and not just the register number in case we have
295 eliminated the frame and/or arg pointer and are using it
297 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
298 /* The arg pointer varies if it is not a fixed register. */
299 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
301 if (x
== pic_offset_table_rtx
302 /* ??? When call-clobbered, the value is stable modulo the restore
303 that must happen after a call. This currently screws up
304 local-alloc into believing that the restore is not needed, so we
305 must return 0 only if we are called from alias analysis. */
306 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
311 /* The operand 0 of a LO_SUM is considered constant
312 (in fact it is related specifically to operand 1)
313 during alias analysis. */
314 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
315 || rtx_varies_p (XEXP (x
, 1), for_alias
);
318 if (MEM_VOLATILE_P (x
))
327 fmt
= GET_RTX_FORMAT (code
);
328 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
331 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
334 else if (fmt
[i
] == 'E')
337 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
338 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
345 /* Compute an approximation for the offset between the register
346 FROM and TO for the current function, as it was at the start
350 get_initial_register_offset (int from
, int to
)
352 static const struct elim_table_t
356 } table
[] = ELIMINABLE_REGS
;
357 poly_int64 offset1
, offset2
;
363 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
364 is completed, but we need to give at least an estimate for the stack
365 pointer based on the frame size. */
366 if (!epilogue_completed
)
368 offset1
= crtl
->outgoing_args_size
+ get_frame_size ();
369 #if !STACK_GROWS_DOWNWARD
372 if (to
== STACK_POINTER_REGNUM
)
374 else if (from
== STACK_POINTER_REGNUM
)
380 for (i
= 0; i
< ARRAY_SIZE (table
); i
++)
381 if (table
[i
].from
== from
)
383 if (table
[i
].to
== to
)
385 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
389 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
391 if (table
[j
].to
== to
392 && table
[j
].from
== table
[i
].to
)
394 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
396 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
398 return offset1
+ offset2
;
400 if (table
[j
].from
== to
401 && table
[j
].to
== table
[i
].to
)
403 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
405 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
407 return offset1
- offset2
;
411 else if (table
[i
].to
== from
)
413 if (table
[i
].from
== to
)
415 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
419 for (j
= 0; j
< ARRAY_SIZE (table
); j
++)
421 if (table
[j
].to
== to
422 && table
[j
].from
== table
[i
].from
)
424 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
426 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
428 return - offset1
+ offset2
;
430 if (table
[j
].from
== to
431 && table
[j
].to
== table
[i
].from
)
433 INITIAL_ELIMINATION_OFFSET (table
[i
].from
, table
[i
].to
,
435 INITIAL_ELIMINATION_OFFSET (table
[j
].from
, table
[j
].to
,
437 return - offset1
- offset2
;
442 /* If the requested register combination was not found,
443 try a different more simple combination. */
444 if (from
== ARG_POINTER_REGNUM
)
445 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM
, to
);
446 else if (to
== ARG_POINTER_REGNUM
)
447 return get_initial_register_offset (from
, HARD_FRAME_POINTER_REGNUM
);
448 else if (from
== HARD_FRAME_POINTER_REGNUM
)
449 return get_initial_register_offset (FRAME_POINTER_REGNUM
, to
);
450 else if (to
== HARD_FRAME_POINTER_REGNUM
)
451 return get_initial_register_offset (from
, FRAME_POINTER_REGNUM
);
456 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
457 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
458 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
459 references on strict alignment machines. */
462 rtx_addr_can_trap_p_1 (const_rtx x
, poly_int64 offset
, poly_int64 size
,
463 machine_mode mode
, bool unaligned_mems
)
465 enum rtx_code code
= GET_CODE (x
);
466 gcc_checking_assert (mode
== BLKmode
|| known_size_p (size
));
469 /* The offset must be a multiple of the mode size if we are considering
470 unaligned memory references on strict alignment machines. */
471 if (STRICT_ALIGNMENT
&& unaligned_mems
&& mode
!= BLKmode
)
473 poly_int64 actual_offset
= offset
;
475 #ifdef SPARC_STACK_BOUNDARY_HACK
476 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
477 the real alignment of %sp. However, when it does this, the
478 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
479 if (SPARC_STACK_BOUNDARY_HACK
480 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
481 actual_offset
-= STACK_POINTER_OFFSET
;
484 if (!multiple_p (actual_offset
, GET_MODE_SIZE (mode
)))
491 if (SYMBOL_REF_WEAK (x
))
493 if (!CONSTANT_POOL_ADDRESS_P (x
) && !SYMBOL_REF_FUNCTION_P (x
))
496 poly_int64 decl_size
;
498 if (maybe_lt (offset
, 0))
500 if (!known_size_p (size
))
501 return maybe_ne (offset
, 0);
503 /* If the size of the access or of the symbol is unknown,
505 decl
= SYMBOL_REF_DECL (x
);
507 /* Else check that the access is in bounds. TODO: restructure
508 expr_size/tree_expr_size/int_expr_size and just use the latter. */
511 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
513 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl
), &decl_size
))
516 else if (TREE_CODE (decl
) == STRING_CST
)
517 decl_size
= TREE_STRING_LENGTH (decl
);
518 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
519 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
523 return (!known_size_p (decl_size
) || known_eq (decl_size
, 0)
524 ? maybe_ne (offset
, 0)
525 : !known_subrange_p (offset
, size
, 0, decl_size
));
534 /* Stack references are assumed not to trap, but we need to deal with
535 nonsensical offsets. */
536 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
537 || x
== stack_pointer_rtx
538 /* The arg pointer varies if it is not a fixed register. */
539 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
542 poly_int64 red_zone_size
= RED_ZONE_SIZE
;
544 poly_int64 red_zone_size
= 0;
546 poly_int64 stack_boundary
= PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
;
547 poly_int64 low_bound
, high_bound
;
549 if (!known_size_p (size
))
552 if (x
== frame_pointer_rtx
)
554 if (FRAME_GROWS_DOWNWARD
)
556 high_bound
= targetm
.starting_frame_offset ();
557 low_bound
= high_bound
- get_frame_size ();
561 low_bound
= targetm
.starting_frame_offset ();
562 high_bound
= low_bound
+ get_frame_size ();
565 else if (x
== hard_frame_pointer_rtx
)
568 = get_initial_register_offset (STACK_POINTER_REGNUM
,
569 HARD_FRAME_POINTER_REGNUM
);
571 = get_initial_register_offset (ARG_POINTER_REGNUM
,
572 HARD_FRAME_POINTER_REGNUM
);
574 #if STACK_GROWS_DOWNWARD
575 low_bound
= sp_offset
- red_zone_size
- stack_boundary
;
576 high_bound
= ap_offset
577 + FIRST_PARM_OFFSET (current_function_decl
)
578 #if !ARGS_GROW_DOWNWARD
583 high_bound
= sp_offset
+ red_zone_size
+ stack_boundary
;
584 low_bound
= ap_offset
585 + FIRST_PARM_OFFSET (current_function_decl
)
586 #if ARGS_GROW_DOWNWARD
592 else if (x
== stack_pointer_rtx
)
595 = get_initial_register_offset (ARG_POINTER_REGNUM
,
596 STACK_POINTER_REGNUM
);
598 #if STACK_GROWS_DOWNWARD
599 low_bound
= - red_zone_size
- stack_boundary
;
600 high_bound
= ap_offset
601 + FIRST_PARM_OFFSET (current_function_decl
)
602 #if !ARGS_GROW_DOWNWARD
607 high_bound
= red_zone_size
+ stack_boundary
;
608 low_bound
= ap_offset
609 + FIRST_PARM_OFFSET (current_function_decl
)
610 #if ARGS_GROW_DOWNWARD
618 /* We assume that accesses are safe to at least the
620 Examples are varargs and __builtin_return_address. */
621 #if ARGS_GROW_DOWNWARD
622 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
624 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
625 - crtl
->args
.size
- stack_boundary
;
627 low_bound
= FIRST_PARM_OFFSET (current_function_decl
)
629 high_bound
= FIRST_PARM_OFFSET (current_function_decl
)
630 + crtl
->args
.size
+ stack_boundary
;
634 if (known_ge (offset
, low_bound
)
635 && known_le (offset
, high_bound
- size
))
639 /* All of the virtual frame registers are stack references. */
640 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
641 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
646 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
647 mode
, unaligned_mems
);
650 /* An address is assumed not to trap if:
651 - it is the pic register plus a const unspec without offset. */
652 if (XEXP (x
, 0) == pic_offset_table_rtx
653 && GET_CODE (XEXP (x
, 1)) == CONST
654 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == UNSPEC
655 && known_eq (offset
, 0))
658 /* - or it is an address that can't trap plus a constant integer. */
659 if (poly_int_rtx_p (XEXP (x
, 1), &const_x1
)
660 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ const_x1
,
661 size
, mode
, unaligned_mems
))
668 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
669 mode
, unaligned_mems
);
676 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
677 mode
, unaligned_mems
);
683 /* If it isn't one of the case above, it can cause a trap. */
687 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
690 rtx_addr_can_trap_p (const_rtx x
)
692 return rtx_addr_can_trap_p_1 (x
, 0, -1, BLKmode
, false);
695 /* Return true if X contains a MEM subrtx. */
698 contains_mem_rtx_p (rtx x
)
700 subrtx_iterator::array_type array
;
701 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
708 /* Return true if X is an address that is known to not be zero. */
711 nonzero_address_p (const_rtx x
)
713 const enum rtx_code code
= GET_CODE (x
);
718 return flag_delete_null_pointer_checks
&& !SYMBOL_REF_WEAK (x
);
724 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
725 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
726 || x
== stack_pointer_rtx
727 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
729 /* All of the virtual frame registers are stack references. */
730 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
731 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
736 return nonzero_address_p (XEXP (x
, 0));
739 /* Handle PIC references. */
740 if (XEXP (x
, 0) == pic_offset_table_rtx
741 && CONSTANT_P (XEXP (x
, 1)))
746 /* Similar to the above; allow positive offsets. Further, since
747 auto-inc is only allowed in memories, the register must be a
749 if (CONST_INT_P (XEXP (x
, 1))
750 && INTVAL (XEXP (x
, 1)) > 0)
752 return nonzero_address_p (XEXP (x
, 0));
755 /* Similarly. Further, the offset is always positive. */
762 return nonzero_address_p (XEXP (x
, 0));
765 return nonzero_address_p (XEXP (x
, 1));
771 /* If it isn't one of the case above, might be zero. */
775 /* Return 1 if X refers to a memory location whose address
776 cannot be compared reliably with constant addresses,
777 or if X refers to a BLKmode memory object.
778 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
779 zero, we are slightly more conservative. */
782 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
793 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
795 fmt
= GET_RTX_FORMAT (code
);
796 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
799 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
802 else if (fmt
[i
] == 'E')
805 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
806 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
812 /* Return the CALL in X if there is one. */
815 get_call_rtx_from (const rtx_insn
*insn
)
817 rtx x
= PATTERN (insn
);
818 if (GET_CODE (x
) == PARALLEL
)
819 x
= XVECEXP (x
, 0, 0);
820 if (GET_CODE (x
) == SET
)
822 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
827 /* Get the declaration of the function called by INSN. */
830 get_call_fndecl (const rtx_insn
*insn
)
834 note
= find_reg_note (insn
, REG_CALL_DECL
, NULL_RTX
);
835 if (note
== NULL_RTX
)
838 datum
= XEXP (note
, 0);
839 if (datum
!= NULL_RTX
)
840 return SYMBOL_REF_DECL (datum
);
845 /* Return the value of the integer term in X, if one is apparent;
847 Only obvious integer terms are detected.
848 This is used in cse.c with the `related_value' field. */
851 get_integer_term (const_rtx x
)
853 if (GET_CODE (x
) == CONST
)
856 if (GET_CODE (x
) == MINUS
857 && CONST_INT_P (XEXP (x
, 1)))
858 return - INTVAL (XEXP (x
, 1));
859 if (GET_CODE (x
) == PLUS
860 && CONST_INT_P (XEXP (x
, 1)))
861 return INTVAL (XEXP (x
, 1));
865 /* If X is a constant, return the value sans apparent integer term;
867 Only obvious integer terms are detected. */
870 get_related_value (const_rtx x
)
872 if (GET_CODE (x
) != CONST
)
875 if (GET_CODE (x
) == PLUS
876 && CONST_INT_P (XEXP (x
, 1)))
878 else if (GET_CODE (x
) == MINUS
879 && CONST_INT_P (XEXP (x
, 1)))
884 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
885 to somewhere in the same object or object_block as SYMBOL. */
888 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
892 if (GET_CODE (symbol
) != SYMBOL_REF
)
900 if (CONSTANT_POOL_ADDRESS_P (symbol
)
901 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
904 decl
= SYMBOL_REF_DECL (symbol
);
905 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
909 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
910 && SYMBOL_REF_BLOCK (symbol
)
911 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
912 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
913 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
919 /* Split X into a base and a constant offset, storing them in *BASE_OUT
920 and *OFFSET_OUT respectively. */
923 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
925 if (GET_CODE (x
) == CONST
)
928 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
930 *base_out
= XEXP (x
, 0);
931 *offset_out
= XEXP (x
, 1);
936 *offset_out
= const0_rtx
;
939 /* Express integer value X as some value Y plus a polynomial offset,
940 where Y is either const0_rtx, X or something within X (as opposed
941 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
944 strip_offset (rtx x
, poly_int64_pod
*offset_out
)
946 rtx base
= const0_rtx
;
948 if (GET_CODE (test
) == CONST
)
949 test
= XEXP (test
, 0);
950 if (GET_CODE (test
) == PLUS
)
952 base
= XEXP (test
, 0);
953 test
= XEXP (test
, 1);
955 if (poly_int_rtx_p (test
, offset_out
))
961 /* Return the argument size in REG_ARGS_SIZE note X. */
964 get_args_size (const_rtx x
)
966 gcc_checking_assert (REG_NOTE_KIND (x
) == REG_ARGS_SIZE
);
967 return rtx_to_poly_int64 (XEXP (x
, 0));
970 /* Return the number of places FIND appears within X. If COUNT_DEST is
971 zero, we do not count occurrences inside the destination of a SET. */
974 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
978 const char *format_ptr
;
997 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
999 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
1003 if (MEM_P (find
) && rtx_equal_p (x
, find
))
1008 if (SET_DEST (x
) == find
&& ! count_dest
)
1009 return count_occurrences (SET_SRC (x
), find
, count_dest
);
1016 format_ptr
= GET_RTX_FORMAT (code
);
1019 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
1021 switch (*format_ptr
++)
1024 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
1028 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1029 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
1037 /* Return TRUE if OP is a register or subreg of a register that
1038 holds an unsigned quantity. Otherwise, return FALSE. */
1041 unsigned_reg_p (rtx op
)
1045 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
1048 if (GET_CODE (op
) == SUBREG
1049 && SUBREG_PROMOTED_SIGN (op
))
1056 /* Nonzero if register REG appears somewhere within IN.
1057 Also works if REG is not a register; in this case it checks
1058 for a subexpression of IN that is Lisp "equal" to REG. */
1061 reg_mentioned_p (const_rtx reg
, const_rtx in
)
1073 if (GET_CODE (in
) == LABEL_REF
)
1074 return reg
== label_ref_label (in
);
1076 code
= GET_CODE (in
);
1080 /* Compare registers by number. */
1082 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
1084 /* These codes have no constituent expressions
1092 /* These are kept unique for a given value. */
1099 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
1102 fmt
= GET_RTX_FORMAT (code
);
1104 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1109 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
1110 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
1113 else if (fmt
[i
] == 'e'
1114 && reg_mentioned_p (reg
, XEXP (in
, i
)))
1120 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1121 no CODE_LABEL insn. */
1124 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
1129 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
1135 /* Nonzero if register REG is used in an insn between
1136 FROM_INSN and TO_INSN (exclusive of those two). */
1139 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1140 const rtx_insn
*to_insn
)
1144 if (from_insn
== to_insn
)
1147 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1148 if (NONDEBUG_INSN_P (insn
)
1149 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
1150 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
1155 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1156 is entirely replaced by a new value and the only use is as a SET_DEST,
1157 we do not consider it a reference. */
1160 reg_referenced_p (const_rtx x
, const_rtx body
)
1164 switch (GET_CODE (body
))
1167 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
1170 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1171 of a REG that occupies all of the REG, the insn references X if
1172 it is mentioned in the destination. */
1173 if (GET_CODE (SET_DEST (body
)) != CC0
1174 && GET_CODE (SET_DEST (body
)) != PC
1175 && !REG_P (SET_DEST (body
))
1176 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
1177 && REG_P (SUBREG_REG (SET_DEST (body
)))
1178 && !read_modify_subreg_p (SET_DEST (body
)))
1179 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
1184 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1185 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
1192 return reg_overlap_mentioned_p (x
, body
);
1195 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
1198 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
1201 case UNSPEC_VOLATILE
:
1202 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1203 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
1208 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1209 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
1214 if (MEM_P (XEXP (body
, 0)))
1215 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
1220 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
1222 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
1229 /* Nonzero if register REG is set or clobbered in an insn between
1230 FROM_INSN and TO_INSN (exclusive of those two). */
1233 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
1234 const rtx_insn
*to_insn
)
1236 const rtx_insn
*insn
;
1238 if (from_insn
== to_insn
)
1241 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
1242 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
1247 /* Return true if REG is set or clobbered inside INSN. */
1250 reg_set_p (const_rtx reg
, const_rtx insn
)
1252 /* After delay slot handling, call and branch insns might be in a
1253 sequence. Check all the elements there. */
1254 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1256 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1257 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1263 /* We can be passed an insn or part of one. If we are passed an insn,
1264 check if a side-effect of the insn clobbers REG. */
1266 && (FIND_REG_INC_NOTE (insn
, reg
)
1269 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1270 && (insn_callee_abi (as_a
<const rtx_insn
*> (insn
))
1271 .clobbers_reg_p (GET_MODE (reg
), REGNO (reg
))))
1273 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1276 /* There are no REG_INC notes for SP autoinc. */
1277 if (reg
== stack_pointer_rtx
&& INSN_P (insn
))
1279 subrtx_var_iterator::array_type array
;
1280 FOR_EACH_SUBRTX_VAR (iter
, array
, PATTERN (insn
), NONCONST
)
1285 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
1287 if (XEXP (XEXP (mem
, 0), 0) == stack_pointer_rtx
)
1289 iter
.skip_subrtxes ();
1294 return set_of (reg
, insn
) != NULL_RTX
;
1297 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1298 only if none of them are modified between START and END. Return 1 if
1299 X contains a MEM; this routine does use memory aliasing. */
1302 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1304 const enum rtx_code code
= GET_CODE (x
);
1325 if (modified_between_p (XEXP (x
, 0), start
, end
))
1327 if (MEM_READONLY_P (x
))
1329 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1330 if (memory_modified_in_insn_p (x
, insn
))
1335 return reg_set_between_p (x
, start
, end
);
1341 fmt
= GET_RTX_FORMAT (code
);
1342 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1344 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1347 else if (fmt
[i
] == 'E')
1348 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1349 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1356 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1357 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1358 does use memory aliasing. */
1361 modified_in_p (const_rtx x
, const_rtx insn
)
1363 const enum rtx_code code
= GET_CODE (x
);
1380 if (modified_in_p (XEXP (x
, 0), insn
))
1382 if (MEM_READONLY_P (x
))
1384 if (memory_modified_in_insn_p (x
, insn
))
1389 return reg_set_p (x
, insn
);
1395 fmt
= GET_RTX_FORMAT (code
);
1396 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1398 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1401 else if (fmt
[i
] == 'E')
1402 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1403 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1410 /* Return true if X is a SUBREG and if storing a value to X would
1411 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1412 target, using a SUBREG to store to one half of a DImode REG would
1413 preserve the other half. */
1416 read_modify_subreg_p (const_rtx x
)
1418 if (GET_CODE (x
) != SUBREG
)
1420 poly_uint64 isize
= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)));
1421 poly_uint64 osize
= GET_MODE_SIZE (GET_MODE (x
));
1422 poly_uint64 regsize
= REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x
)));
1423 /* The inner and outer modes of a subreg must be ordered, so that we
1424 can tell whether they're paradoxical or partial. */
1425 gcc_checking_assert (ordered_p (isize
, osize
));
1426 return (maybe_gt (isize
, osize
) && maybe_gt (isize
, regsize
));
1429 /* Helper function for set_of. */
1437 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1439 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1440 if (rtx_equal_p (x
, data
->pat
)
1441 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1445 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1446 (either directly or via STRICT_LOW_PART and similar modifiers). */
1448 set_of (const_rtx pat
, const_rtx insn
)
1450 struct set_of_data data
;
1451 data
.found
= NULL_RTX
;
1453 note_pattern_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1457 /* Add all hard register in X to *PSET. */
1459 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1461 subrtx_iterator::array_type array
;
1462 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1464 const_rtx x
= *iter
;
1465 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1466 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1470 /* This function, called through note_stores, collects sets and
1471 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1474 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1476 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1477 if (REG_P (x
) && HARD_REGISTER_P (x
))
1478 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1481 /* Examine INSN, and compute the set of hard registers written by it.
1482 Store it in *PSET. Should only be called after reload.
1484 IMPLICIT is true if we should include registers that are fully-clobbered
1485 by calls. This should be used with caution, since it doesn't include
1486 partially-clobbered registers. */
1488 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1492 CLEAR_HARD_REG_SET (*pset
);
1493 note_stores (insn
, record_hard_reg_sets
, pset
);
1494 if (CALL_P (insn
) && implicit
)
1495 *pset
|= insn_callee_abi (insn
).full_reg_clobbers ();
1496 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1497 if (REG_NOTE_KIND (link
) == REG_INC
)
1498 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1501 /* Like record_hard_reg_sets, but called through note_uses. */
1503 record_hard_reg_uses (rtx
*px
, void *data
)
1505 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1508 /* Given an INSN, return a SET expression if this insn has only a single SET.
1509 It may also have CLOBBERs, USEs, or SET whose output
1510 will not be used, which we ignore. */
1513 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1516 int set_verified
= 1;
1519 if (GET_CODE (pat
) == PARALLEL
)
1521 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1523 rtx sub
= XVECEXP (pat
, 0, i
);
1524 switch (GET_CODE (sub
))
1531 /* We can consider insns having multiple sets, where all
1532 but one are dead as single set insns. In common case
1533 only single set is present in the pattern so we want
1534 to avoid checking for REG_UNUSED notes unless necessary.
1536 When we reach set first time, we just expect this is
1537 the single set we are looking for and only when more
1538 sets are found in the insn, we check them. */
1541 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1542 && !side_effects_p (set
))
1548 set
= sub
, set_verified
= 0;
1549 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1550 || side_effects_p (sub
))
1562 /* Given an INSN, return nonzero if it has more than one SET, else return
1566 multiple_sets (const_rtx insn
)
1571 /* INSN must be an insn. */
1572 if (! INSN_P (insn
))
1575 /* Only a PARALLEL can have multiple SETs. */
1576 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1578 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1579 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1581 /* If we have already found a SET, then return now. */
1589 /* Either zero or one SET. */
1593 /* Return nonzero if the destination of SET equals the source
1594 and there are no side effects. */
1597 set_noop_p (const_rtx set
)
1599 rtx src
= SET_SRC (set
);
1600 rtx dst
= SET_DEST (set
);
1602 if (dst
== pc_rtx
&& src
== pc_rtx
)
1605 if (MEM_P (dst
) && MEM_P (src
))
1606 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1608 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1609 return rtx_equal_p (XEXP (dst
, 0), src
)
1610 && !BITS_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1611 && !side_effects_p (src
);
1613 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1614 dst
= XEXP (dst
, 0);
1616 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1618 if (maybe_ne (SUBREG_BYTE (src
), SUBREG_BYTE (dst
)))
1620 src
= SUBREG_REG (src
);
1621 dst
= SUBREG_REG (dst
);
1624 /* It is a NOOP if destination overlaps with selected src vector
1626 if (GET_CODE (src
) == VEC_SELECT
1627 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1628 && HARD_REGISTER_P (XEXP (src
, 0))
1629 && HARD_REGISTER_P (dst
))
1632 rtx par
= XEXP (src
, 1);
1633 rtx src0
= XEXP (src
, 0);
1634 poly_int64 c0
= rtx_to_poly_int64 (XVECEXP (par
, 0, 0));
1635 poly_int64 offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1637 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1638 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par
, 0, i
)), c0
+ i
))
1641 REG_CAN_CHANGE_MODE_P (REGNO (dst
), GET_MODE (src0
), GET_MODE (dst
))
1642 && simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1643 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1646 return (REG_P (src
) && REG_P (dst
)
1647 && REGNO (src
) == REGNO (dst
));
1650 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1654 noop_move_p (const rtx_insn
*insn
)
1656 rtx pat
= PATTERN (insn
);
1658 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1661 /* Insns carrying these notes are useful later on. */
1662 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1665 /* Check the code to be executed for COND_EXEC. */
1666 if (GET_CODE (pat
) == COND_EXEC
)
1667 pat
= COND_EXEC_CODE (pat
);
1669 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1672 if (GET_CODE (pat
) == PARALLEL
)
1675 /* If nothing but SETs of registers to themselves,
1676 this insn can also be deleted. */
1677 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1679 rtx tem
= XVECEXP (pat
, 0, i
);
1681 if (GET_CODE (tem
) == USE
|| GET_CODE (tem
) == CLOBBER
)
1684 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1694 /* Return nonzero if register in range [REGNO, ENDREGNO)
1695 appears either explicitly or implicitly in X
1696 other than being stored into.
1698 References contained within the substructure at LOC do not count.
1699 LOC may be zero, meaning don't ignore anything. */
1702 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1706 unsigned int x_regno
;
1711 /* The contents of a REG_NONNEG note is always zero, so we must come here
1712 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1716 code
= GET_CODE (x
);
1721 x_regno
= REGNO (x
);
1723 /* If we modifying the stack, frame, or argument pointer, it will
1724 clobber a virtual register. In fact, we could be more precise,
1725 but it isn't worth it. */
1726 if ((x_regno
== STACK_POINTER_REGNUM
1727 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1728 && x_regno
== ARG_POINTER_REGNUM
)
1729 || x_regno
== FRAME_POINTER_REGNUM
)
1730 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1733 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1736 /* If this is a SUBREG of a hard reg, we can see exactly which
1737 registers are being modified. Otherwise, handle normally. */
1738 if (REG_P (SUBREG_REG (x
))
1739 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1741 unsigned int inner_regno
= subreg_regno (x
);
1742 unsigned int inner_endregno
1743 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1744 ? subreg_nregs (x
) : 1);
1746 return endregno
> inner_regno
&& regno
< inner_endregno
;
1752 if (&SET_DEST (x
) != loc
1753 /* Note setting a SUBREG counts as referring to the REG it is in for
1754 a pseudo but not for hard registers since we can
1755 treat each word individually. */
1756 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1757 && loc
!= &SUBREG_REG (SET_DEST (x
))
1758 && REG_P (SUBREG_REG (SET_DEST (x
)))
1759 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1760 && refers_to_regno_p (regno
, endregno
,
1761 SUBREG_REG (SET_DEST (x
)), loc
))
1762 || (!REG_P (SET_DEST (x
))
1763 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1766 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1775 /* X does not match, so try its subexpressions. */
1777 fmt
= GET_RTX_FORMAT (code
);
1778 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1780 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1788 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1791 else if (fmt
[i
] == 'E')
1794 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1795 if (loc
!= &XVECEXP (x
, i
, j
)
1796 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1803 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1804 we check if any register number in X conflicts with the relevant register
1805 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1806 contains a MEM (we don't bother checking for memory addresses that can't
1807 conflict because we expect this to be a rare case. */
1810 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1812 unsigned int regno
, endregno
;
1814 /* If either argument is a constant, then modifying X cannot
1815 affect IN. Here we look at IN, we can profitably combine
1816 CONSTANT_P (x) with the switch statement below. */
1817 if (CONSTANT_P (in
))
1821 switch (GET_CODE (x
))
1824 case STRICT_LOW_PART
:
1827 /* Overly conservative. */
1832 regno
= REGNO (SUBREG_REG (x
));
1833 if (regno
< FIRST_PSEUDO_REGISTER
)
1834 regno
= subreg_regno (x
);
1835 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1836 ? subreg_nregs (x
) : 1);
1841 endregno
= END_REGNO (x
);
1843 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1853 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1854 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1857 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1860 else if (fmt
[i
] == 'E')
1863 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1864 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1874 return reg_mentioned_p (x
, in
);
1880 /* If any register in here refers to it we return true. */
1881 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1882 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1883 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1889 gcc_assert (CONSTANT_P (x
));
1894 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1895 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1896 ignored by note_stores, but passed to FUN.
1898 FUN receives three arguments:
1899 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1900 2. the SET or CLOBBER rtx that does the store,
1901 3. the pointer DATA provided to note_stores.
1903 If the item being stored in or clobbered is a SUBREG of a hard register,
1904 the SUBREG will be passed. */
1907 note_pattern_stores (const_rtx x
,
1908 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1912 if (GET_CODE (x
) == COND_EXEC
)
1913 x
= COND_EXEC_CODE (x
);
1915 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1917 rtx dest
= SET_DEST (x
);
1919 while ((GET_CODE (dest
) == SUBREG
1920 && (!REG_P (SUBREG_REG (dest
))
1921 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1922 || GET_CODE (dest
) == ZERO_EXTRACT
1923 || GET_CODE (dest
) == STRICT_LOW_PART
)
1924 dest
= XEXP (dest
, 0);
1926 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1927 each of whose first operand is a register. */
1928 if (GET_CODE (dest
) == PARALLEL
)
1930 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1931 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1932 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1935 (*fun
) (dest
, x
, data
);
1938 else if (GET_CODE (x
) == PARALLEL
)
1939 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1940 note_pattern_stores (XVECEXP (x
, 0, i
), fun
, data
);
1943 /* Same, but for an instruction. If the instruction is a call, include
1944 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1947 note_stores (const rtx_insn
*insn
,
1948 void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1951 for (rtx link
= CALL_INSN_FUNCTION_USAGE (insn
);
1952 link
; link
= XEXP (link
, 1))
1953 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1954 note_pattern_stores (XEXP (link
, 0), fun
, data
);
1955 note_pattern_stores (PATTERN (insn
), fun
, data
);
1958 /* Like notes_stores, but call FUN for each expression that is being
1959 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1960 FUN for each expression, not any interior subexpressions. FUN receives a
1961 pointer to the expression and the DATA passed to this function.
1963 Note that this is not quite the same test as that done in reg_referenced_p
1964 since that considers something as being referenced if it is being
1965 partially set, while we do not. */
1968 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1973 switch (GET_CODE (body
))
1976 (*fun
) (&COND_EXEC_TEST (body
), data
);
1977 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1981 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1982 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1986 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1987 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1991 (*fun
) (&XEXP (body
, 0), data
);
1995 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1996 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
2000 (*fun
) (&TRAP_CONDITION (body
), data
);
2004 (*fun
) (&XEXP (body
, 0), data
);
2008 case UNSPEC_VOLATILE
:
2009 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
2010 (*fun
) (&XVECEXP (body
, 0, i
), data
);
2014 if (MEM_P (XEXP (body
, 0)))
2015 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
2020 rtx dest
= SET_DEST (body
);
2022 /* For sets we replace everything in source plus registers in memory
2023 expression in store and operands of a ZERO_EXTRACT. */
2024 (*fun
) (&SET_SRC (body
), data
);
2026 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2028 (*fun
) (&XEXP (dest
, 1), data
);
2029 (*fun
) (&XEXP (dest
, 2), data
);
2032 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
2033 dest
= XEXP (dest
, 0);
2036 (*fun
) (&XEXP (dest
, 0), data
);
2041 /* All the other possibilities never store. */
2042 (*fun
) (pbody
, data
);
2047 /* Return nonzero if X's old contents don't survive after INSN.
2048 This will be true if X is (cc0) or if X is a register and
2049 X dies in INSN or because INSN entirely sets X.
2051 "Entirely set" means set directly and not through a SUBREG, or
2052 ZERO_EXTRACT, so no trace of the old contents remains.
2053 Likewise, REG_INC does not count.
2055 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2056 but for this use that makes no difference, since regs don't overlap
2057 during their lifetimes. Therefore, this function may be used
2058 at any time after deaths have been computed.
2060 If REG is a hard reg that occupies multiple machine registers, this
2061 function will only return 1 if each of those registers will be replaced
2065 dead_or_set_p (const rtx_insn
*insn
, const_rtx x
)
2067 unsigned int regno
, end_regno
;
2070 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2071 if (GET_CODE (x
) == CC0
)
2074 gcc_assert (REG_P (x
));
2077 end_regno
= END_REGNO (x
);
2078 for (i
= regno
; i
< end_regno
; i
++)
2079 if (! dead_or_set_regno_p (insn
, i
))
2085 /* Return TRUE iff DEST is a register or subreg of a register, is a
2086 complete rather than read-modify-write destination, and contains
2087 register TEST_REGNO. */
2090 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
2092 unsigned int regno
, endregno
;
2094 if (GET_CODE (dest
) == SUBREG
&& !read_modify_subreg_p (dest
))
2095 dest
= SUBREG_REG (dest
);
2100 regno
= REGNO (dest
);
2101 endregno
= END_REGNO (dest
);
2102 return (test_regno
>= regno
&& test_regno
< endregno
);
2105 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2106 any member matches the covers_regno_no_parallel_p criteria. */
2109 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
2111 if (GET_CODE (dest
) == PARALLEL
)
2113 /* Some targets place small structures in registers for return
2114 values of functions, and those registers are wrapped in
2115 PARALLELs that we may see as the destination of a SET. */
2118 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
2120 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
2121 if (inner
!= NULL_RTX
2122 && covers_regno_no_parallel_p (inner
, test_regno
))
2129 return covers_regno_no_parallel_p (dest
, test_regno
);
2132 /* Utility function for dead_or_set_p to check an individual register. */
2135 dead_or_set_regno_p (const rtx_insn
*insn
, unsigned int test_regno
)
2139 /* See if there is a death note for something that includes TEST_REGNO. */
2140 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
2144 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
2147 pattern
= PATTERN (insn
);
2149 /* If a COND_EXEC is not executed, the value survives. */
2150 if (GET_CODE (pattern
) == COND_EXEC
)
2153 if (GET_CODE (pattern
) == SET
|| GET_CODE (pattern
) == CLOBBER
)
2154 return covers_regno_p (SET_DEST (pattern
), test_regno
);
2155 else if (GET_CODE (pattern
) == PARALLEL
)
2159 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
2161 rtx body
= XVECEXP (pattern
, 0, i
);
2163 if (GET_CODE (body
) == COND_EXEC
)
2164 body
= COND_EXEC_CODE (body
);
2166 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
2167 && covers_regno_p (SET_DEST (body
), test_regno
))
2175 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2176 If DATUM is nonzero, look for one whose datum is DATUM. */
2179 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
2183 gcc_checking_assert (insn
);
2185 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2186 if (! INSN_P (insn
))
2190 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2191 if (REG_NOTE_KIND (link
) == kind
)
2196 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2197 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
2202 /* Return the reg-note of kind KIND in insn INSN which applies to register
2203 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2204 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2205 it might be the case that the note overlaps REGNO. */
2208 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
2212 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2213 if (! INSN_P (insn
))
2216 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2217 if (REG_NOTE_KIND (link
) == kind
2218 /* Verify that it is a register, so that scratch and MEM won't cause a
2220 && REG_P (XEXP (link
, 0))
2221 && REGNO (XEXP (link
, 0)) <= regno
2222 && END_REGNO (XEXP (link
, 0)) > regno
)
2227 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2231 find_reg_equal_equiv_note (const_rtx insn
)
2238 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2239 if (REG_NOTE_KIND (link
) == REG_EQUAL
2240 || REG_NOTE_KIND (link
) == REG_EQUIV
)
2242 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2243 insns that have multiple sets. Checking single_set to
2244 make sure of this is not the proper check, as explained
2245 in the comment in set_unique_reg_note.
2247 This should be changed into an assert. */
2248 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
2255 /* Check whether INSN is a single_set whose source is known to be
2256 equivalent to a constant. Return that constant if so, otherwise
2260 find_constant_src (const rtx_insn
*insn
)
2264 set
= single_set (insn
);
2267 x
= avoid_constant_pool_reference (SET_SRC (set
));
2272 note
= find_reg_equal_equiv_note (insn
);
2273 if (note
&& CONSTANT_P (XEXP (note
, 0)))
2274 return XEXP (note
, 0);
2279 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2280 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2283 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
2285 /* If it's not a CALL_INSN, it can't possibly have a
2286 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2296 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2298 link
= XEXP (link
, 1))
2299 if (GET_CODE (XEXP (link
, 0)) == code
2300 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2305 unsigned int regno
= REGNO (datum
);
2307 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2308 to pseudo registers, so don't bother checking. */
2310 if (regno
< FIRST_PSEUDO_REGISTER
)
2312 unsigned int end_regno
= END_REGNO (datum
);
2315 for (i
= regno
; i
< end_regno
; i
++)
2316 if (find_regno_fusage (insn
, code
, i
))
2324 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2325 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2328 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2332 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2333 to pseudo registers, so don't bother checking. */
2335 if (regno
>= FIRST_PSEUDO_REGISTER
2339 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2343 if (GET_CODE (op
= XEXP (link
, 0)) == code
2344 && REG_P (reg
= XEXP (op
, 0))
2345 && REGNO (reg
) <= regno
2346 && END_REGNO (reg
) > regno
)
2354 /* Return true if KIND is an integer REG_NOTE. */
2357 int_reg_note_p (enum reg_note kind
)
2359 return kind
== REG_BR_PROB
;
2362 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2363 stored as the pointer to the next register note. */
2366 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2370 gcc_checking_assert (!int_reg_note_p (kind
));
2375 case REG_LABEL_TARGET
:
2376 case REG_LABEL_OPERAND
:
2378 /* These types of register notes use an INSN_LIST rather than an
2379 EXPR_LIST, so that copying is done right and dumps look
2381 note
= alloc_INSN_LIST (datum
, list
);
2382 PUT_REG_NOTE_KIND (note
, kind
);
2386 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2393 /* Add register note with kind KIND and datum DATUM to INSN. */
2396 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2398 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2401 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2404 add_int_reg_note (rtx_insn
*insn
, enum reg_note kind
, int datum
)
2406 gcc_checking_assert (int_reg_note_p (kind
));
2407 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2408 datum
, REG_NOTES (insn
));
2411 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2414 add_args_size_note (rtx_insn
*insn
, poly_int64 value
)
2416 gcc_checking_assert (!find_reg_note (insn
, REG_ARGS_SIZE
, NULL_RTX
));
2417 add_reg_note (insn
, REG_ARGS_SIZE
, gen_int_mode (value
, Pmode
));
2420 /* Add a register note like NOTE to INSN. */
2423 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2425 if (GET_CODE (note
) == INT_LIST
)
2426 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2428 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2431 /* Duplicate NOTE and return the copy. */
2433 duplicate_reg_note (rtx note
)
2435 reg_note kind
= REG_NOTE_KIND (note
);
2437 if (GET_CODE (note
) == INT_LIST
)
2438 return gen_rtx_INT_LIST ((machine_mode
) kind
, XINT (note
, 0), NULL_RTX
);
2439 else if (GET_CODE (note
) == EXPR_LIST
)
2440 return alloc_reg_note (kind
, copy_insn_1 (XEXP (note
, 0)), NULL_RTX
);
2442 return alloc_reg_note (kind
, XEXP (note
, 0), NULL_RTX
);
2445 /* Remove register note NOTE from the REG_NOTES of INSN. */
2448 remove_note (rtx_insn
*insn
, const_rtx note
)
2452 if (note
== NULL_RTX
)
2455 if (REG_NOTES (insn
) == note
)
2456 REG_NOTES (insn
) = XEXP (note
, 1);
2458 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2459 if (XEXP (link
, 1) == note
)
2461 XEXP (link
, 1) = XEXP (note
, 1);
2465 switch (REG_NOTE_KIND (note
))
2469 df_notes_rescan (insn
);
2476 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2477 Return true if any note has been removed. */
2480 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2485 loc
= ®_NOTES (insn
);
2488 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2489 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2491 *loc
= XEXP (*loc
, 1);
2495 loc
= &XEXP (*loc
, 1);
2500 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2503 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2510 /* This loop is a little tricky. We cannot just go down the chain because
2511 it is being modified by some actions in the loop. So we just iterate
2512 over the head. We plan to drain the list anyway. */
2513 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2515 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2516 rtx note
= find_reg_equal_equiv_note (insn
);
2518 /* This assert is generally triggered when someone deletes a REG_EQUAL
2519 or REG_EQUIV note by hacking the list manually rather than calling
2523 remove_note (insn
, note
);
2527 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2528 return 1 if it is found. A simple equality test is used to determine if
2532 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2536 for (x
= listp
; x
; x
= XEXP (x
, 1))
2537 if (node
== XEXP (x
, 0))
2543 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2544 remove that entry from the list if it is found.
2546 A simple equality test is used to determine if NODE matches. */
2549 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2551 rtx_expr_list
*temp
= *listp
;
2552 rtx_expr_list
*prev
= NULL
;
2556 if (node
== temp
->element ())
2558 /* Splice the node out of the list. */
2560 XEXP (prev
, 1) = temp
->next ();
2562 *listp
= temp
->next ();
2568 temp
= temp
->next ();
2572 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2573 remove that entry from the list if it is found.
2575 A simple equality test is used to determine if NODE matches. */
2578 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2580 rtx_insn_list
*temp
= *listp
;
2581 rtx_insn_list
*prev
= NULL
;
2585 if (node
== temp
->insn ())
2587 /* Splice the node out of the list. */
2589 XEXP (prev
, 1) = temp
->next ();
2591 *listp
= temp
->next ();
2597 temp
= temp
->next ();
2601 /* Nonzero if X contains any volatile instructions. These are instructions
2602 which may cause unpredictable machine state instructions, and thus no
2603 instructions or register uses should be moved or combined across them.
2604 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2607 volatile_insn_p (const_rtx x
)
2609 const RTX_CODE code
= GET_CODE (x
);
2627 case UNSPEC_VOLATILE
:
2632 if (MEM_VOLATILE_P (x
))
2639 /* Recursively scan the operands of this expression. */
2642 const char *const fmt
= GET_RTX_FORMAT (code
);
2645 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2649 if (volatile_insn_p (XEXP (x
, i
)))
2652 else if (fmt
[i
] == 'E')
2655 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2656 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2664 /* Nonzero if X contains any volatile memory references
2665 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2668 volatile_refs_p (const_rtx x
)
2670 const RTX_CODE code
= GET_CODE (x
);
2686 case UNSPEC_VOLATILE
:
2692 if (MEM_VOLATILE_P (x
))
2699 /* Recursively scan the operands of this expression. */
2702 const char *const fmt
= GET_RTX_FORMAT (code
);
2705 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2709 if (volatile_refs_p (XEXP (x
, i
)))
2712 else if (fmt
[i
] == 'E')
2715 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2716 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2724 /* Similar to above, except that it also rejects register pre- and post-
2728 side_effects_p (const_rtx x
)
2730 const RTX_CODE code
= GET_CODE (x
);
2747 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2748 when some combination can't be done. If we see one, don't think
2749 that we can simplify the expression. */
2750 return (GET_MODE (x
) != VOIDmode
);
2759 case UNSPEC_VOLATILE
:
2765 if (MEM_VOLATILE_P (x
))
2772 /* Recursively scan the operands of this expression. */
2775 const char *fmt
= GET_RTX_FORMAT (code
);
2778 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2782 if (side_effects_p (XEXP (x
, i
)))
2785 else if (fmt
[i
] == 'E')
2788 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2789 if (side_effects_p (XVECEXP (x
, i
, j
)))
2797 /* Return nonzero if evaluating rtx X might cause a trap.
2798 FLAGS controls how to consider MEMs. A nonzero means the context
2799 of the access may have changed from the original, such that the
2800 address may have become invalid. */
2803 may_trap_p_1 (const_rtx x
, unsigned flags
)
2809 /* We make no distinction currently, but this function is part of
2810 the internal target-hooks ABI so we keep the parameter as
2811 "unsigned flags". */
2812 bool code_changed
= flags
!= 0;
2816 code
= GET_CODE (x
);
2819 /* Handle these cases quickly. */
2831 return targetm
.unspec_may_trap_p (x
, flags
);
2833 case UNSPEC_VOLATILE
:
2839 return MEM_VOLATILE_P (x
);
2841 /* Memory ref can trap unless it's a static var or a stack slot. */
2843 /* Recognize specific pattern of stack checking probes. */
2844 if (flag_stack_check
2845 && MEM_VOLATILE_P (x
)
2846 && XEXP (x
, 0) == stack_pointer_rtx
)
2848 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2849 reference; moving it out of context such as when moving code
2850 when optimizing, might cause its address to become invalid. */
2852 || !MEM_NOTRAP_P (x
))
2854 poly_int64 size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : -1;
2855 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2856 GET_MODE (x
), code_changed
);
2861 /* Division by a non-constant might trap. */
2866 if (HONOR_SNANS (x
))
2868 if (FLOAT_MODE_P (GET_MODE (x
)))
2869 return flag_trapping_math
;
2870 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2872 if (GET_CODE (XEXP (x
, 1)) == CONST_VECTOR
)
2874 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
2875 unsigned int n_elts
;
2876 rtx op
= XEXP (x
, 1);
2877 if (!GET_MODE_NUNITS (GET_MODE (op
)).is_constant (&n_elts
))
2879 if (!CONST_VECTOR_DUPLICATE_P (op
))
2881 for (unsigned i
= 0; i
< (unsigned int) XVECLEN (op
, 0); i
++)
2882 if (CONST_VECTOR_ENCODED_ELT (op
, i
) == const0_rtx
)
2886 for (unsigned i
= 0; i
< n_elts
; i
++)
2887 if (CONST_VECTOR_ELT (op
, i
) == const0_rtx
)
2893 /* An EXPR_LIST is used to represent a function call. This
2894 certainly may trap. */
2903 /* Some floating point comparisons may trap. */
2904 if (!flag_trapping_math
)
2906 /* ??? There is no machine independent way to check for tests that trap
2907 when COMPARE is used, though many targets do make this distinction.
2908 For instance, sparc uses CCFPE for compares which generate exceptions
2909 and CCFP for compares which do not generate exceptions. */
2912 /* But often the compare has some CC mode, so check operand
2914 if (HONOR_NANS (XEXP (x
, 0))
2915 || HONOR_NANS (XEXP (x
, 1)))
2921 if (HONOR_SNANS (x
))
2923 /* Often comparison is CC mode, so check operand modes. */
2924 if (HONOR_SNANS (XEXP (x
, 0))
2925 || HONOR_SNANS (XEXP (x
, 1)))
2930 /* Conversion of floating point might trap. */
2931 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2942 /* These operations don't trap even with floating point. */
2946 /* Any floating arithmetic may trap. */
2947 if (FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2951 fmt
= GET_RTX_FORMAT (code
);
2952 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2956 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2959 else if (fmt
[i
] == 'E')
2962 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2963 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2970 /* Return nonzero if evaluating rtx X might cause a trap. */
2973 may_trap_p (const_rtx x
)
2975 return may_trap_p_1 (x
, 0);
2978 /* Same as above, but additionally return nonzero if evaluating rtx X might
2979 cause a fault. We define a fault for the purpose of this function as a
2980 erroneous execution condition that cannot be encountered during the normal
2981 execution of a valid program; the typical example is an unaligned memory
2982 access on a strict alignment machine. The compiler guarantees that it
2983 doesn't generate code that will fault from a valid program, but this
2984 guarantee doesn't mean anything for individual instructions. Consider
2985 the following example:
2987 struct S { int d; union { char *cp; int *ip; }; };
2989 int foo(struct S *s)
2997 on a strict alignment machine. In a valid program, foo will never be
2998 invoked on a structure for which d is equal to 1 and the underlying
2999 unique field of the union not aligned on a 4-byte boundary, but the
3000 expression *s->ip might cause a fault if considered individually.
3002 At the RTL level, potentially problematic expressions will almost always
3003 verify may_trap_p; for example, the above dereference can be emitted as
3004 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3005 However, suppose that foo is inlined in a caller that causes s->cp to
3006 point to a local character variable and guarantees that s->d is not set
3007 to 1; foo may have been effectively translated into pseudo-RTL as:
3010 (set (reg:SI) (mem:SI (%fp - 7)))
3012 (set (reg:QI) (mem:QI (%fp - 7)))
3014 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3015 memory reference to a stack slot, but it will certainly cause a fault
3016 on a strict alignment machine. */
3019 may_trap_or_fault_p (const_rtx x
)
3021 return may_trap_p_1 (x
, 1);
3024 /* Return nonzero if X contains a comparison that is not either EQ or NE,
3025 i.e., an inequality. */
3028 inequality_comparisons_p (const_rtx x
)
3032 const enum rtx_code code
= GET_CODE (x
);
3060 len
= GET_RTX_LENGTH (code
);
3061 fmt
= GET_RTX_FORMAT (code
);
3063 for (i
= 0; i
< len
; i
++)
3067 if (inequality_comparisons_p (XEXP (x
, i
)))
3070 else if (fmt
[i
] == 'E')
3073 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3074 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
3082 /* Replace any occurrence of FROM in X with TO. The function does
3083 not enter into CONST_DOUBLE for the replace.
3085 Note that copying is not done so X must not be shared unless all copies
3088 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3089 those pointer-equal ones. */
3092 replace_rtx (rtx x
, rtx from
, rtx to
, bool all_regs
)
3100 /* Allow this function to make replacements in EXPR_LISTs. */
3107 && REGNO (x
) == REGNO (from
))
3109 gcc_assert (GET_MODE (x
) == GET_MODE (from
));
3112 else if (GET_CODE (x
) == SUBREG
)
3114 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
, all_regs
);
3116 if (CONST_INT_P (new_rtx
))
3118 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
3119 GET_MODE (SUBREG_REG (x
)),
3124 SUBREG_REG (x
) = new_rtx
;
3128 else if (GET_CODE (x
) == ZERO_EXTEND
)
3130 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
, all_regs
);
3132 if (CONST_INT_P (new_rtx
))
3134 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
3135 new_rtx
, GET_MODE (XEXP (x
, 0)));
3139 XEXP (x
, 0) = new_rtx
;
3144 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3145 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3148 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
, all_regs
);
3149 else if (fmt
[i
] == 'E')
3150 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3151 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
),
3152 from
, to
, all_regs
);
3158 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3159 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3162 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
3164 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3166 if (JUMP_TABLE_DATA_P (x
))
3169 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
3170 int len
= GET_NUM_ELEM (vec
);
3171 for (int i
= 0; i
< len
; ++i
)
3173 rtx ref
= RTVEC_ELT (vec
, i
);
3174 if (XEXP (ref
, 0) == old_label
)
3176 XEXP (ref
, 0) = new_label
;
3177 if (update_label_nuses
)
3179 ++LABEL_NUSES (new_label
);
3180 --LABEL_NUSES (old_label
);
3187 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3188 field. This is not handled by the iterator because it doesn't
3189 handle unprinted ('0') fields. */
3190 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
3191 JUMP_LABEL (x
) = new_label
;
3193 subrtx_ptr_iterator::array_type array
;
3194 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
3199 if (GET_CODE (x
) == SYMBOL_REF
3200 && CONSTANT_POOL_ADDRESS_P (x
))
3202 rtx c
= get_pool_constant (x
);
3203 if (rtx_referenced_p (old_label
, c
))
3205 /* Create a copy of constant C; replace the label inside
3206 but do not update LABEL_NUSES because uses in constant pool
3208 rtx new_c
= copy_rtx (c
);
3209 replace_label (&new_c
, old_label
, new_label
, false);
3211 /* Add the new constant NEW_C to constant pool and replace
3212 the old reference to constant by new reference. */
3213 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
3214 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
3218 if ((GET_CODE (x
) == LABEL_REF
3219 || GET_CODE (x
) == INSN_LIST
)
3220 && XEXP (x
, 0) == old_label
)
3222 XEXP (x
, 0) = new_label
;
3223 if (update_label_nuses
)
3225 ++LABEL_NUSES (new_label
);
3226 --LABEL_NUSES (old_label
);
3234 replace_label_in_insn (rtx_insn
*insn
, rtx_insn
*old_label
,
3235 rtx_insn
*new_label
, bool update_label_nuses
)
3237 rtx insn_as_rtx
= insn
;
3238 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
3239 gcc_checking_assert (insn_as_rtx
== insn
);
3242 /* Return true if X is referenced in BODY. */
3245 rtx_referenced_p (const_rtx x
, const_rtx body
)
3247 subrtx_iterator::array_type array
;
3248 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
3249 if (const_rtx y
= *iter
)
3251 /* Check if a label_ref Y refers to label X. */
3252 if (GET_CODE (y
) == LABEL_REF
3254 && label_ref_label (y
) == x
)
3257 if (rtx_equal_p (x
, y
))
3260 /* If Y is a reference to pool constant traverse the constant. */
3261 if (GET_CODE (y
) == SYMBOL_REF
3262 && CONSTANT_POOL_ADDRESS_P (y
))
3263 iter
.substitute (get_pool_constant (y
));
3268 /* If INSN is a tablejump return true and store the label (before jump table) to
3269 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3272 tablejump_p (const rtx_insn
*insn
, rtx_insn
**labelp
,
3273 rtx_jump_table_data
**tablep
)
3278 rtx target
= JUMP_LABEL (insn
);
3279 if (target
== NULL_RTX
|| ANY_RETURN_P (target
))
3282 rtx_insn
*label
= as_a
<rtx_insn
*> (target
);
3283 rtx_insn
*table
= next_insn (label
);
3284 if (table
== NULL_RTX
|| !JUMP_TABLE_DATA_P (table
))
3290 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
3294 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3295 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3298 tablejump_casesi_pattern (const rtx_insn
*insn
)
3302 if ((tmp
= single_set (insn
)) != NULL
3303 && SET_DEST (tmp
) == pc_rtx
3304 && GET_CODE (SET_SRC (tmp
)) == IF_THEN_ELSE
3305 && GET_CODE (XEXP (SET_SRC (tmp
), 2)) == LABEL_REF
)
3311 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3312 constant that is not in the constant pool and not in the condition
3313 of an IF_THEN_ELSE. */
3316 computed_jump_p_1 (const_rtx x
)
3318 const enum rtx_code code
= GET_CODE (x
);
3335 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
3336 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
3339 return (computed_jump_p_1 (XEXP (x
, 1))
3340 || computed_jump_p_1 (XEXP (x
, 2)));
3346 fmt
= GET_RTX_FORMAT (code
);
3347 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3350 && computed_jump_p_1 (XEXP (x
, i
)))
3353 else if (fmt
[i
] == 'E')
3354 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3355 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
3362 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3364 Tablejumps and casesi insns are not considered indirect jumps;
3365 we can recognize them by a (use (label_ref)). */
3368 computed_jump_p (const rtx_insn
*insn
)
3373 rtx pat
= PATTERN (insn
);
3375 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3376 if (JUMP_LABEL (insn
) != NULL
)
3379 if (GET_CODE (pat
) == PARALLEL
)
3381 int len
= XVECLEN (pat
, 0);
3382 int has_use_labelref
= 0;
3384 for (i
= len
- 1; i
>= 0; i
--)
3385 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3386 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3389 has_use_labelref
= 1;
3393 if (! has_use_labelref
)
3394 for (i
= len
- 1; i
>= 0; i
--)
3395 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3396 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3397 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3400 else if (GET_CODE (pat
) == SET
3401 && SET_DEST (pat
) == pc_rtx
3402 && computed_jump_p_1 (SET_SRC (pat
)))
3410 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3411 the equivalent add insn and pass the result to FN, using DATA as the
3415 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3417 rtx x
= XEXP (mem
, 0);
3418 switch (GET_CODE (x
))
3423 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3424 rtx r1
= XEXP (x
, 0);
3425 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3426 return fn (mem
, x
, r1
, r1
, c
, data
);
3432 poly_int64 size
= GET_MODE_SIZE (GET_MODE (mem
));
3433 rtx r1
= XEXP (x
, 0);
3434 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3435 return fn (mem
, x
, r1
, r1
, c
, data
);
3441 rtx r1
= XEXP (x
, 0);
3442 rtx add
= XEXP (x
, 1);
3443 return fn (mem
, x
, r1
, add
, NULL
, data
);
3451 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3452 For each such autoinc operation found, call FN, passing it
3453 the innermost enclosing MEM, the operation itself, the RTX modified
3454 by the operation, two RTXs (the second may be NULL) that, once
3455 added, represent the value to be held by the modified RTX
3456 afterwards, and DATA. FN is to return 0 to continue the
3457 traversal or any other value to have it returned to the caller of
3458 for_each_inc_dec. */
3461 for_each_inc_dec (rtx x
,
3462 for_each_inc_dec_fn fn
,
3465 subrtx_var_iterator::array_type array
;
3466 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3471 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3473 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3476 iter
.skip_subrtxes ();
3483 /* Searches X for any reference to REGNO, returning the rtx of the
3484 reference found if any. Otherwise, returns NULL_RTX. */
3487 regno_use_in (unsigned int regno
, rtx x
)
3493 if (REG_P (x
) && REGNO (x
) == regno
)
3496 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3497 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3501 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3504 else if (fmt
[i
] == 'E')
3505 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3506 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3513 /* Return a value indicating whether OP, an operand of a commutative
3514 operation, is preferred as the first or second operand. The more
3515 positive the value, the stronger the preference for being the first
3519 commutative_operand_precedence (rtx op
)
3521 enum rtx_code code
= GET_CODE (op
);
3523 /* Constants always become the second operand. Prefer "nice" constants. */
3524 if (code
== CONST_INT
)
3526 if (code
== CONST_WIDE_INT
)
3528 if (code
== CONST_POLY_INT
)
3530 if (code
== CONST_DOUBLE
)
3532 if (code
== CONST_FIXED
)
3534 op
= avoid_constant_pool_reference (op
);
3535 code
= GET_CODE (op
);
3537 switch (GET_RTX_CLASS (code
))
3540 if (code
== CONST_INT
)
3542 if (code
== CONST_WIDE_INT
)
3544 if (code
== CONST_POLY_INT
)
3546 if (code
== CONST_DOUBLE
)
3548 if (code
== CONST_FIXED
)
3553 /* SUBREGs of objects should come second. */
3554 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3559 /* Complex expressions should be the first, so decrease priority
3560 of objects. Prefer pointer objects over non pointer objects. */
3561 if ((REG_P (op
) && REG_POINTER (op
))
3562 || (MEM_P (op
) && MEM_POINTER (op
)))
3566 case RTX_COMM_ARITH
:
3567 /* Prefer operands that are themselves commutative to be first.
3568 This helps to make things linear. In particular,
3569 (and (and (reg) (reg)) (not (reg))) is canonical. */
3573 /* If only one operand is a binary expression, it will be the first
3574 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3575 is canonical, although it will usually be further simplified. */
3579 /* Then prefer NEG and NOT. */
3580 if (code
== NEG
|| code
== NOT
)
3589 /* Return 1 iff it is necessary to swap operands of commutative operation
3590 in order to canonicalize expression. */
3593 swap_commutative_operands_p (rtx x
, rtx y
)
3595 return (commutative_operand_precedence (x
)
3596 < commutative_operand_precedence (y
));
3599 /* Return 1 if X is an autoincrement side effect and the register is
3600 not the stack pointer. */
3602 auto_inc_p (const_rtx x
)
3604 switch (GET_CODE (x
))
3612 /* There are no REG_INC notes for SP. */
3613 if (XEXP (x
, 0) != stack_pointer_rtx
)
3621 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3623 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3632 code
= GET_CODE (in
);
3633 fmt
= GET_RTX_FORMAT (code
);
3634 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3638 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3641 else if (fmt
[i
] == 'E')
3642 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3643 if (loc
== &XVECEXP (in
, i
, j
)
3644 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3650 /* Reinterpret a subreg as a bit extraction from an integer and return
3651 the position of the least significant bit of the extracted value.
3652 In other words, if the extraction were performed as a shift right
3653 and mask, return the number of bits to shift right.
3655 The outer value of the subreg has OUTER_BYTES bytes and starts at
3656 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3659 subreg_size_lsb (poly_uint64 outer_bytes
,
3660 poly_uint64 inner_bytes
,
3661 poly_uint64 subreg_byte
)
3663 poly_uint64 subreg_end
, trailing_bytes
, byte_pos
;
3665 /* A paradoxical subreg begins at bit position 0. */
3666 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3667 if (maybe_gt (outer_bytes
, inner_bytes
))
3669 gcc_checking_assert (known_eq (subreg_byte
, 0U));
3673 subreg_end
= subreg_byte
+ outer_bytes
;
3674 trailing_bytes
= inner_bytes
- subreg_end
;
3675 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3676 byte_pos
= trailing_bytes
;
3677 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3678 byte_pos
= subreg_byte
;
3681 /* When bytes and words have opposite endianness, we must be able
3682 to split offsets into words and bytes at compile time. */
3683 poly_uint64 leading_word_part
3684 = force_align_down (subreg_byte
, UNITS_PER_WORD
);
3685 poly_uint64 trailing_word_part
3686 = force_align_down (trailing_bytes
, UNITS_PER_WORD
);
3687 /* If the subreg crosses a word boundary ensure that
3688 it also begins and ends on a word boundary. */
3689 gcc_assert (known_le (subreg_end
- leading_word_part
,
3690 (unsigned int) UNITS_PER_WORD
)
3691 || (known_eq (leading_word_part
, subreg_byte
)
3692 && known_eq (trailing_word_part
, trailing_bytes
)));
3693 if (WORDS_BIG_ENDIAN
)
3694 byte_pos
= trailing_word_part
+ (subreg_byte
- leading_word_part
);
3696 byte_pos
= leading_word_part
+ (trailing_bytes
- trailing_word_part
);
3699 return byte_pos
* BITS_PER_UNIT
;
3702 /* Given a subreg X, return the bit offset where the subreg begins
3703 (counting from the least significant bit of the reg). */
3706 subreg_lsb (const_rtx x
)
3708 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3712 /* Return the subreg byte offset for a subreg whose outer value has
3713 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3714 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3715 lsb of the inner value. This is the inverse of the calculation
3716 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3719 subreg_size_offset_from_lsb (poly_uint64 outer_bytes
, poly_uint64 inner_bytes
,
3720 poly_uint64 lsb_shift
)
3722 /* A paradoxical subreg begins at bit position 0. */
3723 gcc_checking_assert (ordered_p (outer_bytes
, inner_bytes
));
3724 if (maybe_gt (outer_bytes
, inner_bytes
))
3726 gcc_checking_assert (known_eq (lsb_shift
, 0U));
3730 poly_uint64 lower_bytes
= exact_div (lsb_shift
, BITS_PER_UNIT
);
3731 poly_uint64 upper_bytes
= inner_bytes
- (lower_bytes
+ outer_bytes
);
3732 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
3734 else if (!WORDS_BIG_ENDIAN
&& !BYTES_BIG_ENDIAN
)
3738 /* When bytes and words have opposite endianness, we must be able
3739 to split offsets into words and bytes at compile time. */
3740 poly_uint64 lower_word_part
= force_align_down (lower_bytes
,
3742 poly_uint64 upper_word_part
= force_align_down (upper_bytes
,
3744 if (WORDS_BIG_ENDIAN
)
3745 return upper_word_part
+ (lower_bytes
- lower_word_part
);
3747 return lower_word_part
+ (upper_bytes
- upper_word_part
);
3751 /* Fill in information about a subreg of a hard register.
3752 xregno - A regno of an inner hard subreg_reg (or what will become one).
3753 xmode - The mode of xregno.
3754 offset - The byte offset.
3755 ymode - The mode of a top level SUBREG (or what may become one).
3756 info - Pointer to structure to fill in.
3758 Rather than considering one particular inner register (and thus one
3759 particular "outer" register) in isolation, this function really uses
3760 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3761 function does not check whether adding INFO->offset to XREGNO gives
3762 a valid hard register; even if INFO->offset + XREGNO is out of range,
3763 there might be another register of the same type that is in range.
3764 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3765 the new register, since that can depend on things like whether the final
3766 register number is even or odd. Callers that want to check whether
3767 this particular subreg can be replaced by a simple (reg ...) should
3768 use simplify_subreg_regno. */
3771 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3772 poly_uint64 offset
, machine_mode ymode
,
3773 struct subreg_info
*info
)
3775 unsigned int nregs_xmode
, nregs_ymode
;
3777 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3779 poly_uint64 xsize
= GET_MODE_SIZE (xmode
);
3780 poly_uint64 ysize
= GET_MODE_SIZE (ymode
);
3782 bool rknown
= false;
3784 /* If the register representation of a non-scalar mode has holes in it,
3785 we expect the scalar units to be concatenated together, with the holes
3786 distributed evenly among the scalar units. Each scalar unit must occupy
3787 at least one register. */
3788 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3790 /* As a consequence, we must be dealing with a constant number of
3791 scalars, and thus a constant offset and number of units. */
3792 HOST_WIDE_INT coffset
= offset
.to_constant ();
3793 HOST_WIDE_INT cysize
= ysize
.to_constant ();
3794 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3795 unsigned int nunits
= GET_MODE_NUNITS (xmode
).to_constant ();
3796 scalar_mode xmode_unit
= GET_MODE_INNER (xmode
);
3797 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3798 gcc_assert (nregs_xmode
3800 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3801 gcc_assert (hard_regno_nregs (xregno
, xmode
)
3802 == hard_regno_nregs (xregno
, xmode_unit
) * nunits
);
3804 /* You can only ask for a SUBREG of a value with holes in the middle
3805 if you don't cross the holes. (Such a SUBREG should be done by
3806 picking a different register class, or doing it in memory if
3807 necessary.) An example of a value with holes is XCmode on 32-bit
3808 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3809 3 for each part, but in memory it's two 128-bit parts.
3810 Padding is assumed to be at the end (not necessarily the 'high part')
3812 if ((coffset
/ GET_MODE_SIZE (xmode_unit
) + 1 < nunits
)
3813 && (coffset
/ GET_MODE_SIZE (xmode_unit
)
3814 != ((coffset
+ cysize
- 1) / GET_MODE_SIZE (xmode_unit
))))
3816 info
->representable_p
= false;
3821 nregs_xmode
= hard_regno_nregs (xregno
, xmode
);
3823 nregs_ymode
= hard_regno_nregs (xregno
, ymode
);
3825 /* Subreg sizes must be ordered, so that we can tell whether they are
3826 partial, paradoxical or complete. */
3827 gcc_checking_assert (ordered_p (xsize
, ysize
));
3829 /* Paradoxical subregs are otherwise valid. */
3830 if (!rknown
&& known_eq (offset
, 0U) && maybe_gt (ysize
, xsize
))
3832 info
->representable_p
= true;
3833 /* If this is a big endian paradoxical subreg, which uses more
3834 actual hard registers than the original register, we must
3835 return a negative offset so that we find the proper highpart
3838 We assume that the ordering of registers within a multi-register
3839 value has a consistent endianness: if bytes and register words
3840 have different endianness, the hard registers that make up a
3841 multi-register value must be at least word-sized. */
3842 if (REG_WORDS_BIG_ENDIAN
)
3843 info
->offset
= (int) nregs_xmode
- (int) nregs_ymode
;
3846 info
->nregs
= nregs_ymode
;
3850 /* If registers store different numbers of bits in the different
3851 modes, we cannot generally form this subreg. */
3852 poly_uint64 regsize_xmode
, regsize_ymode
;
3853 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3854 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3855 && multiple_p (xsize
, nregs_xmode
, ®size_xmode
)
3856 && multiple_p (ysize
, nregs_ymode
, ®size_ymode
))
3859 && ((nregs_ymode
> 1 && maybe_gt (regsize_xmode
, regsize_ymode
))
3860 || (nregs_xmode
> 1 && maybe_gt (regsize_ymode
, regsize_xmode
))))
3862 info
->representable_p
= false;
3863 if (!can_div_away_from_zero_p (ysize
, regsize_xmode
, &info
->nregs
)
3864 || !can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3865 /* Checked by validate_subreg. We must know at compile time
3866 which inner registers are being accessed. */
3870 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3871 would go outside of XMODE. */
3872 if (!rknown
&& maybe_gt (ysize
+ offset
, xsize
))
3874 info
->representable_p
= false;
3875 info
->nregs
= nregs_ymode
;
3876 if (!can_div_trunc_p (offset
, regsize_xmode
, &info
->offset
))
3877 /* Checked by validate_subreg. We must know at compile time
3878 which inner registers are being accessed. */
3882 /* Quick exit for the simple and common case of extracting whole
3883 subregisters from a multiregister value. */
3884 /* ??? It would be better to integrate this into the code below,
3885 if we can generalize the concept enough and figure out how
3886 odd-sized modes can coexist with the other weird cases we support. */
3887 HOST_WIDE_INT count
;
3889 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3890 && known_eq (regsize_xmode
, regsize_ymode
)
3891 && constant_multiple_p (offset
, regsize_ymode
, &count
))
3893 info
->representable_p
= true;
3894 info
->nregs
= nregs_ymode
;
3895 info
->offset
= count
;
3896 gcc_assert (info
->offset
+ info
->nregs
<= (int) nregs_xmode
);
3901 /* Lowpart subregs are otherwise valid. */
3902 if (!rknown
&& known_eq (offset
, subreg_lowpart_offset (ymode
, xmode
)))
3904 info
->representable_p
= true;
3907 if (known_eq (offset
, 0U) || nregs_xmode
== nregs_ymode
)
3910 info
->nregs
= nregs_ymode
;
3915 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3916 values there are in (reg:XMODE XREGNO). We can view the register
3917 as consisting of this number of independent "blocks", where each
3918 block occupies NREGS_YMODE registers and contains exactly one
3919 representable YMODE value. */
3920 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3921 unsigned int num_blocks
= nregs_xmode
/ nregs_ymode
;
3923 /* Calculate the number of bytes in each block. This must always
3924 be exact, otherwise we don't know how to verify the constraint.
3925 These conditions may be relaxed but subreg_regno_offset would
3926 need to be redesigned. */
3927 poly_uint64 bytes_per_block
= exact_div (xsize
, num_blocks
);
3929 /* Get the number of the first block that contains the subreg and the byte
3930 offset of the subreg from the start of that block. */
3931 unsigned int block_number
;
3932 poly_uint64 subblock_offset
;
3933 if (!can_div_trunc_p (offset
, bytes_per_block
, &block_number
,
3935 /* Checked by validate_subreg. We must know at compile time which
3936 inner registers are being accessed. */
3941 /* Only the lowpart of each block is representable. */
3942 info
->representable_p
3943 = known_eq (subblock_offset
,
3944 subreg_size_lowpart_offset (ysize
, bytes_per_block
));
3948 /* We assume that the ordering of registers within a multi-register
3949 value has a consistent endianness: if bytes and register words
3950 have different endianness, the hard registers that make up a
3951 multi-register value must be at least word-sized. */
3952 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
)
3953 /* The block number we calculated above followed memory endianness.
3954 Convert it to register endianness by counting back from the end.
3955 (Note that, because of the assumption above, each block must be
3956 at least word-sized.) */
3957 info
->offset
= (num_blocks
- block_number
- 1) * nregs_ymode
;
3959 info
->offset
= block_number
* nregs_ymode
;
3960 info
->nregs
= nregs_ymode
;
3963 /* This function returns the regno offset of a subreg expression.
3964 xregno - A regno of an inner hard subreg_reg (or what will become one).
3965 xmode - The mode of xregno.
3966 offset - The byte offset.
3967 ymode - The mode of a top level SUBREG (or what may become one).
3968 RETURN - The regno offset which would be used. */
3970 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3971 poly_uint64 offset
, machine_mode ymode
)
3973 struct subreg_info info
;
3974 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3978 /* This function returns true when the offset is representable via
3979 subreg_offset in the given regno.
3980 xregno - A regno of an inner hard subreg_reg (or what will become one).
3981 xmode - The mode of xregno.
3982 offset - The byte offset.
3983 ymode - The mode of a top level SUBREG (or what may become one).
3984 RETURN - Whether the offset is representable. */
3986 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3987 poly_uint64 offset
, machine_mode ymode
)
3989 struct subreg_info info
;
3990 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3991 return info
.representable_p
;
3994 /* Return the number of a YMODE register to which
3996 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3998 can be simplified. Return -1 if the subreg can't be simplified.
4000 XREGNO is a hard register number. */
4003 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
4004 poly_uint64 offset
, machine_mode ymode
)
4006 struct subreg_info info
;
4007 unsigned int yregno
;
4009 /* Give the backend a chance to disallow the mode change. */
4010 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
4011 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
4012 && !REG_CAN_CHANGE_MODE_P (xregno
, xmode
, ymode
)
4013 /* We can use mode change in LRA for some transformations. */
4014 && ! lra_in_progress
)
4017 /* We shouldn't simplify stack-related registers. */
4018 if ((!reload_completed
|| frame_pointer_needed
)
4019 && xregno
== FRAME_POINTER_REGNUM
)
4022 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
4023 && xregno
== ARG_POINTER_REGNUM
)
4026 if (xregno
== STACK_POINTER_REGNUM
4027 /* We should convert hard stack register in LRA if it is
4029 && ! lra_in_progress
)
4032 /* Try to get the register offset. */
4033 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
4034 if (!info
.representable_p
)
4037 /* Make sure that the offsetted register value is in range. */
4038 yregno
= xregno
+ info
.offset
;
4039 if (!HARD_REGISTER_NUM_P (yregno
))
4042 /* See whether (reg:YMODE YREGNO) is valid.
4044 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4045 This is a kludge to work around how complex FP arguments are passed
4046 on IA-64 and should be fixed. See PR target/49226. */
4047 if (!targetm
.hard_regno_mode_ok (yregno
, ymode
)
4048 && targetm
.hard_regno_mode_ok (xregno
, xmode
))
4051 return (int) yregno
;
4054 /* Return the final regno that a subreg expression refers to. */
4056 subreg_regno (const_rtx x
)
4059 rtx subreg
= SUBREG_REG (x
);
4060 int regno
= REGNO (subreg
);
4062 ret
= regno
+ subreg_regno_offset (regno
,
4070 /* Return the number of registers that a subreg expression refers
4073 subreg_nregs (const_rtx x
)
4075 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
4078 /* Return the number of registers that a subreg REG with REGNO
4079 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4080 changed so that the regno can be passed in. */
4083 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
4085 struct subreg_info info
;
4086 rtx subreg
= SUBREG_REG (x
);
4088 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
4093 struct parms_set_data
4099 /* Helper function for noticing stores to parameter registers. */
4101 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
4103 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
4104 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
4105 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
4107 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
4112 /* Look backward for first parameter to be loaded.
4113 Note that loads of all parameters will not necessarily be
4114 found if CSE has eliminated some of them (e.g., an argument
4115 to the outer function is passed down as a parameter).
4116 Do not skip BOUNDARY. */
4118 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
4120 struct parms_set_data parm
;
4122 rtx_insn
*before
, *first_set
;
4124 /* Since different machines initialize their parameter registers
4125 in different orders, assume nothing. Collect the set of all
4126 parameter registers. */
4127 CLEAR_HARD_REG_SET (parm
.regs
);
4129 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
4130 if (GET_CODE (XEXP (p
, 0)) == USE
4131 && REG_P (XEXP (XEXP (p
, 0), 0))
4132 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p
, 0), 0)))
4134 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
4136 /* We only care about registers which can hold function
4138 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
4141 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
4145 first_set
= call_insn
;
4147 /* Search backward for the first set of a register in this set. */
4148 while (parm
.nregs
&& before
!= boundary
)
4150 before
= PREV_INSN (before
);
4152 /* It is possible that some loads got CSEed from one call to
4153 another. Stop in that case. */
4154 if (CALL_P (before
))
4157 /* Our caller needs either ensure that we will find all sets
4158 (in case code has not been optimized yet), or take care
4159 for possible labels in a way by setting boundary to preceding
4161 if (LABEL_P (before
))
4163 gcc_assert (before
== boundary
);
4167 if (INSN_P (before
))
4169 int nregs_old
= parm
.nregs
;
4170 note_stores (before
, parms_set
, &parm
);
4171 /* If we found something that did not set a parameter reg,
4172 we're done. Do not keep going, as that might result
4173 in hoisting an insn before the setting of a pseudo
4174 that is used by the hoisted insn. */
4175 if (nregs_old
!= parm
.nregs
)
4184 /* Return true if we should avoid inserting code between INSN and preceding
4185 call instruction. */
4188 keep_with_call_p (const rtx_insn
*insn
)
4192 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
4194 if (REG_P (SET_DEST (set
))
4195 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
4196 && fixed_regs
[REGNO (SET_DEST (set
))]
4197 && general_operand (SET_SRC (set
), VOIDmode
))
4199 if (REG_P (SET_SRC (set
))
4200 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
4201 && REG_P (SET_DEST (set
))
4202 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4204 /* There may be a stack pop just after the call and before the store
4205 of the return register. Search for the actual store when deciding
4206 if we can break or not. */
4207 if (SET_DEST (set
) == stack_pointer_rtx
)
4209 /* This CONST_CAST is okay because next_nonnote_insn just
4210 returns its argument and we assign it to a const_rtx
4213 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
4214 if (i2
&& keep_with_call_p (i2
))
4221 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4222 to non-complex jumps. That is, direct unconditional, conditional,
4223 and tablejumps, but not computed jumps or returns. It also does
4224 not apply to the fallthru case of a conditional jump. */
4227 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
4229 rtx tmp
= JUMP_LABEL (jump_insn
);
4230 rtx_jump_table_data
*table
;
4235 if (tablejump_p (jump_insn
, NULL
, &table
))
4237 rtvec vec
= table
->get_labels ();
4238 int i
, veclen
= GET_NUM_ELEM (vec
);
4240 for (i
= 0; i
< veclen
; ++i
)
4241 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
4245 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
4252 /* Return an estimate of the cost of computing rtx X.
4253 One use is in cse, to decide which expression to keep in the hash table.
4254 Another is in rtl generation, to pick the cheapest way to multiply.
4255 Other uses like the latter are expected in the future.
4257 X appears as operand OPNO in an expression with code OUTER_CODE.
4258 SPEED specifies whether costs optimized for speed or size should
4262 rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer_code
,
4263 int opno
, bool speed
)
4274 if (GET_MODE (x
) != VOIDmode
)
4275 mode
= GET_MODE (x
);
4277 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4278 many insns, taking N times as long. */
4279 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4283 /* Compute the default costs of certain things.
4284 Note that targetm.rtx_costs can override the defaults. */
4286 code
= GET_CODE (x
);
4290 /* Multiplication has time-complexity O(N*N), where N is the
4291 number of units (translated from digits) when using
4292 schoolbook long multiplication. */
4293 total
= factor
* factor
* COSTS_N_INSNS (5);
4299 /* Similarly, complexity for schoolbook long division. */
4300 total
= factor
* factor
* COSTS_N_INSNS (7);
4303 /* Used in combine.c as a marker. */
4307 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4308 the mode for the factor. */
4309 mode
= GET_MODE (SET_DEST (x
));
4310 factor
= estimated_poly_value (GET_MODE_SIZE (mode
)) / UNITS_PER_WORD
;
4315 total
= factor
* COSTS_N_INSNS (1);
4325 /* If we can't tie these modes, make this expensive. The larger
4326 the mode, the more expensive it is. */
4327 if (!targetm
.modes_tieable_p (mode
, GET_MODE (SUBREG_REG (x
))))
4328 return COSTS_N_INSNS (2 + factor
);
4332 if (targetm
.modes_tieable_p (mode
, GET_MODE (XEXP (x
, 0))))
4339 if (targetm
.rtx_costs (x
, mode
, outer_code
, opno
, &total
, speed
))
4344 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4345 which is already in total. */
4347 fmt
= GET_RTX_FORMAT (code
);
4348 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4350 total
+= rtx_cost (XEXP (x
, i
), mode
, code
, i
, speed
);
4351 else if (fmt
[i
] == 'E')
4352 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4353 total
+= rtx_cost (XVECEXP (x
, i
, j
), mode
, code
, i
, speed
);
4358 /* Fill in the structure C with information about both speed and size rtx
4359 costs for X, which is operand OPNO in an expression with code OUTER. */
4362 get_full_rtx_cost (rtx x
, machine_mode mode
, enum rtx_code outer
, int opno
,
4363 struct full_rtx_costs
*c
)
4365 c
->speed
= rtx_cost (x
, mode
, outer
, opno
, true);
4366 c
->size
= rtx_cost (x
, mode
, outer
, opno
, false);
4370 /* Return cost of address expression X.
4371 Expect that X is properly formed address reference.
4373 SPEED parameter specify whether costs optimized for speed or size should
4377 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
4379 /* We may be asked for cost of various unusual addresses, such as operands
4380 of push instruction. It is not worthwhile to complicate writing
4381 of the target hook by such cases. */
4383 if (!memory_address_addr_space_p (mode
, x
, as
))
4386 return targetm
.address_cost (x
, mode
, as
, speed
);
4389 /* If the target doesn't override, compute the cost as with arithmetic. */
4392 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
4394 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
4398 unsigned HOST_WIDE_INT
4399 nonzero_bits (const_rtx x
, machine_mode mode
)
4401 if (mode
== VOIDmode
)
4402 mode
= GET_MODE (x
);
4403 scalar_int_mode int_mode
;
4404 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4405 return GET_MODE_MASK (mode
);
4406 return cached_nonzero_bits (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4410 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
4412 if (mode
== VOIDmode
)
4413 mode
= GET_MODE (x
);
4414 scalar_int_mode int_mode
;
4415 if (!is_a
<scalar_int_mode
> (mode
, &int_mode
))
4417 return cached_num_sign_bit_copies (x
, int_mode
, NULL_RTX
, VOIDmode
, 0);
4420 /* Return true if nonzero_bits1 might recurse into both operands
4424 nonzero_bits_binary_arith_p (const_rtx x
)
4426 if (!ARITHMETIC_P (x
))
4428 switch (GET_CODE (x
))
4450 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4451 It avoids exponential behavior in nonzero_bits1 when X has
4452 identical subexpressions on the first or the second level. */
4454 static unsigned HOST_WIDE_INT
4455 cached_nonzero_bits (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4456 machine_mode known_mode
,
4457 unsigned HOST_WIDE_INT known_ret
)
4459 if (x
== known_x
&& mode
== known_mode
)
4462 /* Try to find identical subexpressions. If found call
4463 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4464 precomputed value for the subexpression as KNOWN_RET. */
4466 if (nonzero_bits_binary_arith_p (x
))
4468 rtx x0
= XEXP (x
, 0);
4469 rtx x1
= XEXP (x
, 1);
4471 /* Check the first level. */
4473 return nonzero_bits1 (x
, mode
, x0
, mode
,
4474 cached_nonzero_bits (x0
, mode
, known_x
,
4475 known_mode
, known_ret
));
4477 /* Check the second level. */
4478 if (nonzero_bits_binary_arith_p (x0
)
4479 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4480 return nonzero_bits1 (x
, mode
, x1
, mode
,
4481 cached_nonzero_bits (x1
, mode
, known_x
,
4482 known_mode
, known_ret
));
4484 if (nonzero_bits_binary_arith_p (x1
)
4485 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4486 return nonzero_bits1 (x
, mode
, x0
, mode
,
4487 cached_nonzero_bits (x0
, mode
, known_x
,
4488 known_mode
, known_ret
));
4491 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4494 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4495 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4496 is less useful. We can't allow both, because that results in exponential
4497 run time recursion. There is a nullstone testcase that triggered
4498 this. This macro avoids accidental uses of num_sign_bit_copies. */
4499 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4501 /* Given an expression, X, compute which bits in X can be nonzero.
4502 We don't care about bits outside of those defined in MODE.
4504 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4505 an arithmetic operation, we can do better. */
4507 static unsigned HOST_WIDE_INT
4508 nonzero_bits1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
4509 machine_mode known_mode
,
4510 unsigned HOST_WIDE_INT known_ret
)
4512 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4513 unsigned HOST_WIDE_INT inner_nz
;
4514 enum rtx_code code
= GET_CODE (x
);
4515 machine_mode inner_mode
;
4516 unsigned int inner_width
;
4517 scalar_int_mode xmode
;
4519 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4521 if (CONST_INT_P (x
))
4523 if (SHORT_IMMEDIATES_SIGN_EXTEND
4525 && mode_width
< BITS_PER_WORD
4526 && (UINTVAL (x
) & (HOST_WIDE_INT_1U
<< (mode_width
- 1))) != 0)
4527 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4532 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
4534 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
4536 /* If X is wider than MODE, use its mode instead. */
4537 if (xmode_width
> mode_width
)
4540 nonzero
= GET_MODE_MASK (mode
);
4541 mode_width
= xmode_width
;
4544 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4545 /* Our only callers in this case look for single bit values. So
4546 just return the mode mask. Those tests will then be false. */
4549 /* If MODE is wider than X, but both are a single word for both the host
4550 and target machines, we can compute this from which bits of the object
4551 might be nonzero in its own mode, taking into account the fact that, on
4552 CISC machines, accessing an object in a wider mode generally causes the
4553 high-order bits to become undefined, so they are not known to be zero.
4554 We extend this reasoning to RISC machines for operations that might not
4555 operate on the full registers. */
4556 if (mode_width
> xmode_width
4557 && xmode_width
<= BITS_PER_WORD
4558 && xmode_width
<= HOST_BITS_PER_WIDE_INT
4559 && !(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
4561 nonzero
&= cached_nonzero_bits (x
, xmode
,
4562 known_x
, known_mode
, known_ret
);
4563 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
);
4567 /* Please keep nonzero_bits_binary_arith_p above in sync with
4568 the code in the switch below. */
4572 #if defined(POINTERS_EXTEND_UNSIGNED)
4573 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4574 all the bits above ptr_mode are known to be zero. */
4575 /* As we do not know which address space the pointer is referring to,
4576 we can do this only if the target does not support different pointer
4577 or address modes depending on the address space. */
4578 if (target_default_pointer_address_modes_p ()
4579 && POINTERS_EXTEND_UNSIGNED
4582 && !targetm
.have_ptr_extend ())
4583 nonzero
&= GET_MODE_MASK (ptr_mode
);
4586 /* Include declared information about alignment of pointers. */
4587 /* ??? We don't properly preserve REG_POINTER changes across
4588 pointer-to-integer casts, so we can't trust it except for
4589 things that we know must be pointers. See execute/960116-1.c. */
4590 if ((x
== stack_pointer_rtx
4591 || x
== frame_pointer_rtx
4592 || x
== arg_pointer_rtx
)
4593 && REGNO_POINTER_ALIGN (REGNO (x
)))
4595 unsigned HOST_WIDE_INT alignment
4596 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4598 #ifdef PUSH_ROUNDING
4599 /* If PUSH_ROUNDING is defined, it is possible for the
4600 stack to be momentarily aligned only to that amount,
4601 so we pick the least alignment. */
4602 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4604 poly_uint64 rounded_1
= PUSH_ROUNDING (poly_int64 (1));
4605 alignment
= MIN (known_alignment (rounded_1
), alignment
);
4609 nonzero
&= ~(alignment
- 1);
4613 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4614 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, xmode
, mode
,
4618 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4619 known_mode
, known_ret
);
4621 return nonzero_for_hook
;
4625 /* In many, if not most, RISC machines, reading a byte from memory
4626 zeros the rest of the register. Noticing that fact saves a lot
4627 of extra zero-extends. */
4628 if (load_extend_op (xmode
) == ZERO_EXTEND
)
4629 nonzero
&= GET_MODE_MASK (xmode
);
4633 case UNEQ
: case LTGT
:
4634 case GT
: case GTU
: case UNGT
:
4635 case LT
: case LTU
: case UNLT
:
4636 case GE
: case GEU
: case UNGE
:
4637 case LE
: case LEU
: case UNLE
:
4638 case UNORDERED
: case ORDERED
:
4639 /* If this produces an integer result, we know which bits are set.
4640 Code here used to clear bits outside the mode of X, but that is
4642 /* Mind that MODE is the mode the caller wants to look at this
4643 operation in, and not the actual operation mode. We can wind
4644 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4645 that describes the results of a vector compare. */
4646 if (GET_MODE_CLASS (xmode
) == MODE_INT
4647 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4648 nonzero
= STORE_FLAG_VALUE
;
4653 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4654 and num_sign_bit_copies. */
4655 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4659 if (xmode_width
< mode_width
)
4660 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (xmode
));
4665 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4666 and num_sign_bit_copies. */
4667 if (num_sign_bit_copies (XEXP (x
, 0), xmode
) == xmode_width
)
4673 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4674 known_x
, known_mode
, known_ret
)
4675 & GET_MODE_MASK (mode
));
4679 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4680 known_x
, known_mode
, known_ret
);
4681 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4682 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4686 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4687 Otherwise, show all the bits in the outer mode but not the inner
4689 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4690 known_x
, known_mode
, known_ret
);
4691 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4693 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4694 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4695 inner_nz
|= (GET_MODE_MASK (mode
)
4696 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4699 nonzero
&= inner_nz
;
4703 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4704 known_x
, known_mode
, known_ret
)
4705 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4706 known_x
, known_mode
, known_ret
);
4710 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4712 unsigned HOST_WIDE_INT nonzero0
4713 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4714 known_x
, known_mode
, known_ret
);
4716 /* Don't call nonzero_bits for the second time if it cannot change
4718 if ((nonzero
& nonzero0
) != nonzero
)
4720 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4721 known_x
, known_mode
, known_ret
);
4725 case PLUS
: case MINUS
:
4727 case DIV
: case UDIV
:
4728 case MOD
: case UMOD
:
4729 /* We can apply the rules of arithmetic to compute the number of
4730 high- and low-order zero bits of these operations. We start by
4731 computing the width (position of the highest-order nonzero bit)
4732 and the number of low-order zero bits for each value. */
4734 unsigned HOST_WIDE_INT nz0
4735 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4736 known_x
, known_mode
, known_ret
);
4737 unsigned HOST_WIDE_INT nz1
4738 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4739 known_x
, known_mode
, known_ret
);
4740 int sign_index
= xmode_width
- 1;
4741 int width0
= floor_log2 (nz0
) + 1;
4742 int width1
= floor_log2 (nz1
) + 1;
4743 int low0
= ctz_or_zero (nz0
);
4744 int low1
= ctz_or_zero (nz1
);
4745 unsigned HOST_WIDE_INT op0_maybe_minusp
4746 = nz0
& (HOST_WIDE_INT_1U
<< sign_index
);
4747 unsigned HOST_WIDE_INT op1_maybe_minusp
4748 = nz1
& (HOST_WIDE_INT_1U
<< sign_index
);
4749 unsigned int result_width
= mode_width
;
4755 result_width
= MAX (width0
, width1
) + 1;
4756 result_low
= MIN (low0
, low1
);
4759 result_low
= MIN (low0
, low1
);
4762 result_width
= width0
+ width1
;
4763 result_low
= low0
+ low1
;
4768 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4769 result_width
= width0
;
4774 result_width
= width0
;
4779 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4780 result_width
= MIN (width0
, width1
);
4781 result_low
= MIN (low0
, low1
);
4786 result_width
= MIN (width0
, width1
);
4787 result_low
= MIN (low0
, low1
);
4793 if (result_width
< mode_width
)
4794 nonzero
&= (HOST_WIDE_INT_1U
<< result_width
) - 1;
4797 nonzero
&= ~((HOST_WIDE_INT_1U
<< result_low
) - 1);
4802 if (CONST_INT_P (XEXP (x
, 1))
4803 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4804 nonzero
&= (HOST_WIDE_INT_1U
<< INTVAL (XEXP (x
, 1))) - 1;
4808 /* If this is a SUBREG formed for a promoted variable that has
4809 been zero-extended, we know that at least the high-order bits
4810 are zero, though others might be too. */
4811 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4812 nonzero
= GET_MODE_MASK (xmode
)
4813 & cached_nonzero_bits (SUBREG_REG (x
), xmode
,
4814 known_x
, known_mode
, known_ret
);
4816 /* If the inner mode is a single word for both the host and target
4817 machines, we can compute this from which bits of the inner
4818 object might be nonzero. */
4819 inner_mode
= GET_MODE (SUBREG_REG (x
));
4820 if (GET_MODE_PRECISION (inner_mode
).is_constant (&inner_width
)
4821 && inner_width
<= BITS_PER_WORD
4822 && inner_width
<= HOST_BITS_PER_WIDE_INT
)
4824 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4825 known_x
, known_mode
, known_ret
);
4827 /* On a typical CISC machine, accessing an object in a wider mode
4828 causes the high-order bits to become undefined. So they are
4829 not known to be zero.
4831 On a typical RISC machine, we only have to worry about the way
4832 loads are extended. Otherwise, if we get a reload for the inner
4833 part, it may be loaded from the stack, and then we may lose all
4834 the zero bits that existed before the store to the stack. */
4836 if ((!WORD_REGISTER_OPERATIONS
4837 || ((extend_op
= load_extend_op (inner_mode
)) == SIGN_EXTEND
4838 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4839 : extend_op
!= ZERO_EXTEND
)
4840 || !MEM_P (SUBREG_REG (x
)))
4841 && xmode_width
> inner_width
)
4843 |= (GET_MODE_MASK (GET_MODE (x
)) & ~GET_MODE_MASK (inner_mode
));
4852 /* The nonzero bits are in two classes: any bits within MODE
4853 that aren't in xmode are always significant. The rest of the
4854 nonzero bits are those that are significant in the operand of
4855 the shift when shifted the appropriate number of bits. This
4856 shows that high-order bits are cleared by the right shift and
4857 low-order bits by left shifts. */
4858 if (CONST_INT_P (XEXP (x
, 1))
4859 && INTVAL (XEXP (x
, 1)) >= 0
4860 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4861 && INTVAL (XEXP (x
, 1)) < xmode_width
)
4863 int count
= INTVAL (XEXP (x
, 1));
4864 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (xmode
);
4865 unsigned HOST_WIDE_INT op_nonzero
4866 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4867 known_x
, known_mode
, known_ret
);
4868 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4869 unsigned HOST_WIDE_INT outer
= 0;
4871 if (mode_width
> xmode_width
)
4872 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4887 /* If the sign bit may have been nonzero before the shift, we
4888 need to mark all the places it could have been copied to
4889 by the shift as possibly nonzero. */
4890 if (inner
& (HOST_WIDE_INT_1U
<< (xmode_width
- 1 - count
)))
4891 inner
|= (((HOST_WIDE_INT_1U
<< count
) - 1)
4892 << (xmode_width
- count
));
4896 inner
= (inner
<< (count
% xmode_width
)
4897 | (inner
>> (xmode_width
- (count
% xmode_width
))))
4902 inner
= (inner
>> (count
% xmode_width
)
4903 | (inner
<< (xmode_width
- (count
% xmode_width
))))
4911 nonzero
&= (outer
| inner
);
4917 /* This is at most the number of bits in the mode. */
4918 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4922 /* If CLZ has a known value at zero, then the nonzero bits are
4923 that value, plus the number of bits in the mode minus one. */
4924 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4926 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4932 /* If CTZ has a known value at zero, then the nonzero bits are
4933 that value, plus the number of bits in the mode minus one. */
4934 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4936 |= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4942 /* This is at most the number of bits in the mode minus 1. */
4943 nonzero
= (HOST_WIDE_INT_1U
<< (floor_log2 (mode_width
))) - 1;
4952 unsigned HOST_WIDE_INT nonzero_true
4953 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4954 known_x
, known_mode
, known_ret
);
4956 /* Don't call nonzero_bits for the second time if it cannot change
4958 if ((nonzero
& nonzero_true
) != nonzero
)
4959 nonzero
&= nonzero_true
4960 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4961 known_x
, known_mode
, known_ret
);
4972 /* See the macro definition above. */
4973 #undef cached_num_sign_bit_copies
4976 /* Return true if num_sign_bit_copies1 might recurse into both operands
4980 num_sign_bit_copies_binary_arith_p (const_rtx x
)
4982 if (!ARITHMETIC_P (x
))
4984 switch (GET_CODE (x
))
5002 /* The function cached_num_sign_bit_copies is a wrapper around
5003 num_sign_bit_copies1. It avoids exponential behavior in
5004 num_sign_bit_copies1 when X has identical subexpressions on the
5005 first or the second level. */
5008 cached_num_sign_bit_copies (const_rtx x
, scalar_int_mode mode
,
5009 const_rtx known_x
, machine_mode known_mode
,
5010 unsigned int known_ret
)
5012 if (x
== known_x
&& mode
== known_mode
)
5015 /* Try to find identical subexpressions. If found call
5016 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5017 the precomputed value for the subexpression as KNOWN_RET. */
5019 if (num_sign_bit_copies_binary_arith_p (x
))
5021 rtx x0
= XEXP (x
, 0);
5022 rtx x1
= XEXP (x
, 1);
5024 /* Check the first level. */
5027 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5028 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5032 /* Check the second level. */
5033 if (num_sign_bit_copies_binary_arith_p (x0
)
5034 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
5036 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
5037 cached_num_sign_bit_copies (x1
, mode
, known_x
,
5041 if (num_sign_bit_copies_binary_arith_p (x1
)
5042 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
5044 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
5045 cached_num_sign_bit_copies (x0
, mode
, known_x
,
5050 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
5053 /* Return the number of bits at the high-order end of X that are known to
5054 be equal to the sign bit. X will be used in mode MODE. The returned
5055 value will always be between 1 and the number of bits in MODE. */
5058 num_sign_bit_copies1 (const_rtx x
, scalar_int_mode mode
, const_rtx known_x
,
5059 machine_mode known_mode
,
5060 unsigned int known_ret
)
5062 enum rtx_code code
= GET_CODE (x
);
5063 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
5064 int num0
, num1
, result
;
5065 unsigned HOST_WIDE_INT nonzero
;
5067 if (CONST_INT_P (x
))
5069 /* If the constant is negative, take its 1's complement and remask.
5070 Then see how many zero bits we have. */
5071 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
5072 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5073 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5074 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5076 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5079 scalar_int_mode xmode
, inner_mode
;
5080 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
5083 unsigned int xmode_width
= GET_MODE_PRECISION (xmode
);
5085 /* For a smaller mode, just ignore the high bits. */
5086 if (bitwidth
< xmode_width
)
5088 num0
= cached_num_sign_bit_copies (x
, xmode
,
5089 known_x
, known_mode
, known_ret
);
5090 return MAX (1, num0
- (int) (xmode_width
- bitwidth
));
5093 if (bitwidth
> xmode_width
)
5095 /* If this machine does not do all register operations on the entire
5096 register and MODE is wider than the mode of X, we can say nothing
5097 at all about the high-order bits. We extend this reasoning to RISC
5098 machines for operations that might not operate on full registers. */
5099 if (!(WORD_REGISTER_OPERATIONS
&& word_register_operation_p (x
)))
5102 /* Likewise on machines that do, if the mode of the object is smaller
5103 than a word and loads of that size don't sign extend, we can say
5104 nothing about the high order bits. */
5105 if (xmode_width
< BITS_PER_WORD
5106 && load_extend_op (xmode
) != SIGN_EXTEND
)
5110 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5111 the code in the switch below. */
5116 #if defined(POINTERS_EXTEND_UNSIGNED)
5117 /* If pointers extend signed and this is a pointer in Pmode, say that
5118 all the bits above ptr_mode are known to be sign bit copies. */
5119 /* As we do not know which address space the pointer is referring to,
5120 we can do this only if the target does not support different pointer
5121 or address modes depending on the address space. */
5122 if (target_default_pointer_address_modes_p ()
5123 && ! POINTERS_EXTEND_UNSIGNED
&& xmode
== Pmode
5124 && mode
== Pmode
&& REG_POINTER (x
)
5125 && !targetm
.have_ptr_extend ())
5126 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
5130 unsigned int copies_for_hook
= 1, copies
= 1;
5131 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, xmode
, mode
,
5135 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
5136 known_mode
, known_ret
);
5138 if (copies
> 1 || copies_for_hook
> 1)
5139 return MAX (copies
, copies_for_hook
);
5141 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5146 /* Some RISC machines sign-extend all loads of smaller than a word. */
5147 if (load_extend_op (xmode
) == SIGN_EXTEND
)
5148 return MAX (1, ((int) bitwidth
- (int) xmode_width
+ 1));
5152 /* If this is a SUBREG for a promoted object that is sign-extended
5153 and we are looking at it in a wider mode, we know that at least the
5154 high-order bits are known to be sign bit copies. */
5156 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
5158 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5159 known_x
, known_mode
, known_ret
);
5160 return MAX ((int) bitwidth
- (int) xmode_width
+ 1, num0
);
5163 if (is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (x
)), &inner_mode
))
5165 /* For a smaller object, just ignore the high bits. */
5166 if (bitwidth
<= GET_MODE_PRECISION (inner_mode
))
5168 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), inner_mode
,
5169 known_x
, known_mode
,
5171 return MAX (1, num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5175 /* For paradoxical SUBREGs on machines where all register operations
5176 affect the entire register, just look inside. Note that we are
5177 passing MODE to the recursive call, so the number of sign bit
5178 copies will remain relative to that mode, not the inner mode.
5180 This works only if loads sign extend. Otherwise, if we get a
5181 reload for the inner part, it may be loaded from the stack, and
5182 then we lose all sign bit copies that existed before the store
5184 if (WORD_REGISTER_OPERATIONS
5185 && load_extend_op (inner_mode
) == SIGN_EXTEND
5186 && paradoxical_subreg_p (x
)
5187 && MEM_P (SUBREG_REG (x
)))
5188 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
5189 known_x
, known_mode
, known_ret
);
5194 if (CONST_INT_P (XEXP (x
, 1)))
5195 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
5199 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
5200 return (bitwidth
- GET_MODE_PRECISION (inner_mode
)
5201 + cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5202 known_x
, known_mode
, known_ret
));
5206 /* For a smaller object, just ignore the high bits. */
5207 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
5208 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), inner_mode
,
5209 known_x
, known_mode
, known_ret
);
5210 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (inner_mode
)
5214 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5215 known_x
, known_mode
, known_ret
);
5217 case ROTATE
: case ROTATERT
:
5218 /* If we are rotating left by a number of bits less than the number
5219 of sign bit copies, we can just subtract that amount from the
5221 if (CONST_INT_P (XEXP (x
, 1))
5222 && INTVAL (XEXP (x
, 1)) >= 0
5223 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
5225 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5226 known_x
, known_mode
, known_ret
);
5227 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
5228 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
5233 /* In general, this subtracts one sign bit copy. But if the value
5234 is known to be positive, the number of sign bit copies is the
5235 same as that of the input. Finally, if the input has just one bit
5236 that might be nonzero, all the bits are copies of the sign bit. */
5237 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5238 known_x
, known_mode
, known_ret
);
5239 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5240 return num0
> 1 ? num0
- 1 : 1;
5242 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5247 && ((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
))
5252 case IOR
: case AND
: case XOR
:
5253 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
5254 /* Logical operations will preserve the number of sign-bit copies.
5255 MIN and MAX operations always return one of the operands. */
5256 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5257 known_x
, known_mode
, known_ret
);
5258 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5259 known_x
, known_mode
, known_ret
);
5261 /* If num1 is clearing some of the top bits then regardless of
5262 the other term, we are guaranteed to have at least that many
5263 high-order zero bits. */
5266 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5267 && CONST_INT_P (XEXP (x
, 1))
5268 && (UINTVAL (XEXP (x
, 1))
5269 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) == 0)
5272 /* Similarly for IOR when setting high-order bits. */
5275 && bitwidth
<= HOST_BITS_PER_WIDE_INT
5276 && CONST_INT_P (XEXP (x
, 1))
5277 && (UINTVAL (XEXP (x
, 1))
5278 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5281 return MIN (num0
, num1
);
5283 case PLUS
: case MINUS
:
5284 /* For addition and subtraction, we can have a 1-bit carry. However,
5285 if we are subtracting 1 from a positive number, there will not
5286 be such a carry. Furthermore, if the positive number is known to
5287 be 0 or 1, we know the result is either -1 or 0. */
5289 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
5290 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
5292 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
5293 if (((HOST_WIDE_INT_1U
<< (bitwidth
- 1)) & nonzero
) == 0)
5294 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
5295 : bitwidth
- floor_log2 (nonzero
) - 1);
5298 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5299 known_x
, known_mode
, known_ret
);
5300 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5301 known_x
, known_mode
, known_ret
);
5302 result
= MAX (1, MIN (num0
, num1
) - 1);
5307 /* The number of bits of the product is the sum of the number of
5308 bits of both terms. However, unless one of the terms if known
5309 to be positive, we must allow for an additional bit since negating
5310 a negative number can remove one sign bit copy. */
5312 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5313 known_x
, known_mode
, known_ret
);
5314 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5315 known_x
, known_mode
, known_ret
);
5317 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
5319 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5320 || (((nonzero_bits (XEXP (x
, 0), mode
)
5321 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5322 && ((nonzero_bits (XEXP (x
, 1), mode
)
5323 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1)))
5327 return MAX (1, result
);
5330 /* The result must be <= the first operand. If the first operand
5331 has the high bit set, we know nothing about the number of sign
5333 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5335 else if ((nonzero_bits (XEXP (x
, 0), mode
)
5336 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5339 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5340 known_x
, known_mode
, known_ret
);
5343 /* The result must be <= the second operand. If the second operand
5344 has (or just might have) the high bit set, we know nothing about
5345 the number of sign bit copies. */
5346 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5348 else if ((nonzero_bits (XEXP (x
, 1), mode
)
5349 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5352 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5353 known_x
, known_mode
, known_ret
);
5356 /* Similar to unsigned division, except that we have to worry about
5357 the case where the divisor is negative, in which case we have
5359 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5360 known_x
, known_mode
, known_ret
);
5362 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5363 || (nonzero_bits (XEXP (x
, 1), mode
)
5364 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5370 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5371 known_x
, known_mode
, known_ret
);
5373 && (bitwidth
> HOST_BITS_PER_WIDE_INT
5374 || (nonzero_bits (XEXP (x
, 1), mode
)
5375 & (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0))
5381 /* Shifts by a constant add to the number of bits equal to the
5383 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5384 known_x
, known_mode
, known_ret
);
5385 if (CONST_INT_P (XEXP (x
, 1))
5386 && INTVAL (XEXP (x
, 1)) > 0
5387 && INTVAL (XEXP (x
, 1)) < xmode_width
)
5388 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
5393 /* Left shifts destroy copies. */
5394 if (!CONST_INT_P (XEXP (x
, 1))
5395 || INTVAL (XEXP (x
, 1)) < 0
5396 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
5397 || INTVAL (XEXP (x
, 1)) >= xmode_width
)
5400 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
5401 known_x
, known_mode
, known_ret
);
5402 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
5405 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
5406 known_x
, known_mode
, known_ret
);
5407 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
5408 known_x
, known_mode
, known_ret
);
5409 return MIN (num0
, num1
);
5411 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
5412 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
5413 case GEU
: case GTU
: case LEU
: case LTU
:
5414 case UNORDERED
: case ORDERED
:
5415 /* If the constant is negative, take its 1's complement and remask.
5416 Then see how many zero bits we have. */
5417 nonzero
= STORE_FLAG_VALUE
;
5418 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
5419 && (nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))) != 0)
5420 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
5422 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5428 /* If we haven't been able to figure it out by one of the above rules,
5429 see if some of the high-order bits are known to be zero. If so,
5430 count those bits and return one less than that amount. If we can't
5431 safely compute the mask for this mode, always return BITWIDTH. */
5433 bitwidth
= GET_MODE_PRECISION (mode
);
5434 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5437 nonzero
= nonzero_bits (x
, mode
);
5438 return nonzero
& (HOST_WIDE_INT_1U
<< (bitwidth
- 1))
5439 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5442 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5443 zero indicates an instruction pattern without a known cost. */
5446 pattern_cost (rtx pat
, bool speed
)
5451 /* Extract the single set rtx from the instruction pattern. We
5452 can't use single_set since we only have the pattern. We also
5453 consider PARALLELs of a normal set and a single comparison. In
5454 that case we use the cost of the non-comparison SET operation,
5455 which is most-likely to be the real cost of this operation. */
5456 if (GET_CODE (pat
) == SET
)
5458 else if (GET_CODE (pat
) == PARALLEL
)
5461 rtx comparison
= NULL_RTX
;
5463 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5465 rtx x
= XVECEXP (pat
, 0, i
);
5466 if (GET_CODE (x
) == SET
)
5468 if (GET_CODE (SET_SRC (x
)) == COMPARE
)
5483 if (!set
&& comparison
)
5492 cost
= set_src_cost (SET_SRC (set
), GET_MODE (SET_DEST (set
)), speed
);
5493 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5496 /* Calculate the cost of a single instruction. A return value of zero
5497 indicates an instruction pattern without a known cost. */
5500 insn_cost (rtx_insn
*insn
, bool speed
)
5502 if (targetm
.insn_cost
)
5503 return targetm
.insn_cost (insn
, speed
);
5505 return pattern_cost (PATTERN (insn
), speed
);
5508 /* Returns estimate on cost of computing SEQ. */
5511 seq_cost (const rtx_insn
*seq
, bool speed
)
5516 for (; seq
; seq
= NEXT_INSN (seq
))
5518 set
= single_set (seq
);
5520 cost
+= set_rtx_cost (set
, speed
);
5521 else if (NONDEBUG_INSN_P (seq
))
5523 int this_cost
= insn_cost (CONST_CAST_RTX_INSN (seq
), speed
);
5534 /* Given an insn INSN and condition COND, return the condition in a
5535 canonical form to simplify testing by callers. Specifically:
5537 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5538 (2) Both operands will be machine operands; (cc0) will have been replaced.
5539 (3) If an operand is a constant, it will be the second operand.
5540 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5541 for GE, GEU, and LEU.
5543 If the condition cannot be understood, or is an inequality floating-point
5544 comparison which needs to be reversed, 0 will be returned.
5546 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5548 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5549 insn used in locating the condition was found. If a replacement test
5550 of the condition is desired, it should be placed in front of that
5551 insn and we will be sure that the inputs are still valid.
5553 If WANT_REG is nonzero, we wish the condition to be relative to that
5554 register, if possible. Therefore, do not canonicalize the condition
5555 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5556 to be a compare to a CC mode register.
5558 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5562 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5563 rtx_insn
**earliest
,
5564 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5567 rtx_insn
*prev
= insn
;
5571 int reverse_code
= 0;
5573 basic_block bb
= BLOCK_FOR_INSN (insn
);
5575 code
= GET_CODE (cond
);
5576 mode
= GET_MODE (cond
);
5577 op0
= XEXP (cond
, 0);
5578 op1
= XEXP (cond
, 1);
5581 code
= reversed_comparison_code (cond
, insn
);
5582 if (code
== UNKNOWN
)
5588 /* If we are comparing a register with zero, see if the register is set
5589 in the previous insn to a COMPARE or a comparison operation. Perform
5590 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5593 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5594 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5595 && op1
== CONST0_RTX (GET_MODE (op0
))
5598 /* Set nonzero when we find something of interest. */
5601 /* If comparison with cc0, import actual comparison from compare
5605 if ((prev
= prev_nonnote_insn (prev
)) == 0
5606 || !NONJUMP_INSN_P (prev
)
5607 || (set
= single_set (prev
)) == 0
5608 || SET_DEST (set
) != cc0_rtx
)
5611 op0
= SET_SRC (set
);
5612 op1
= CONST0_RTX (GET_MODE (op0
));
5617 /* If this is a COMPARE, pick up the two things being compared. */
5618 if (GET_CODE (op0
) == COMPARE
)
5620 op1
= XEXP (op0
, 1);
5621 op0
= XEXP (op0
, 0);
5624 else if (!REG_P (op0
))
5627 /* Go back to the previous insn. Stop if it is not an INSN. We also
5628 stop if it isn't a single set or if it has a REG_INC note because
5629 we don't want to bother dealing with it. */
5631 prev
= prev_nonnote_nondebug_insn (prev
);
5634 || !NONJUMP_INSN_P (prev
)
5635 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5636 /* In cfglayout mode, there do not have to be labels at the
5637 beginning of a block, or jumps at the end, so the previous
5638 conditions would not stop us when we reach bb boundary. */
5639 || BLOCK_FOR_INSN (prev
) != bb
)
5642 set
= set_of (op0
, prev
);
5645 && (GET_CODE (set
) != SET
5646 || !rtx_equal_p (SET_DEST (set
), op0
)))
5649 /* If this is setting OP0, get what it sets it to if it looks
5653 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5654 #ifdef FLOAT_STORE_FLAG_VALUE
5655 REAL_VALUE_TYPE fsfv
;
5658 /* ??? We may not combine comparisons done in a CCmode with
5659 comparisons not done in a CCmode. This is to aid targets
5660 like Alpha that have an IEEE compliant EQ instruction, and
5661 a non-IEEE compliant BEQ instruction. The use of CCmode is
5662 actually artificial, simply to prevent the combination, but
5663 should not affect other platforms.
5665 However, we must allow VOIDmode comparisons to match either
5666 CCmode or non-CCmode comparison, because some ports have
5667 modeless comparisons inside branch patterns.
5669 ??? This mode check should perhaps look more like the mode check
5670 in simplify_comparison in combine. */
5671 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5672 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5674 && inner_mode
!= VOIDmode
)
5676 if (GET_CODE (SET_SRC (set
)) == COMPARE
5679 && val_signbit_known_set_p (inner_mode
,
5681 #ifdef FLOAT_STORE_FLAG_VALUE
5683 && SCALAR_FLOAT_MODE_P (inner_mode
)
5684 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5685 REAL_VALUE_NEGATIVE (fsfv
)))
5688 && COMPARISON_P (SET_SRC (set
))))
5690 else if (((code
== EQ
5692 && val_signbit_known_set_p (inner_mode
,
5694 #ifdef FLOAT_STORE_FLAG_VALUE
5696 && SCALAR_FLOAT_MODE_P (inner_mode
)
5697 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5698 REAL_VALUE_NEGATIVE (fsfv
)))
5701 && COMPARISON_P (SET_SRC (set
)))
5706 else if ((code
== EQ
|| code
== NE
)
5707 && GET_CODE (SET_SRC (set
)) == XOR
)
5708 /* Handle sequences like:
5711 ...(eq|ne op0 (const_int 0))...
5715 (eq op0 (const_int 0)) reduces to (eq X Y)
5716 (ne op0 (const_int 0)) reduces to (ne X Y)
5718 This is the form used by MIPS16, for example. */
5724 else if (reg_set_p (op0
, prev
))
5725 /* If this sets OP0, but not directly, we have to give up. */
5730 /* If the caller is expecting the condition to be valid at INSN,
5731 make sure X doesn't change before INSN. */
5732 if (valid_at_insn_p
)
5733 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5735 if (COMPARISON_P (x
))
5736 code
= GET_CODE (x
);
5739 code
= reversed_comparison_code (x
, prev
);
5740 if (code
== UNKNOWN
)
5745 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5751 /* If constant is first, put it last. */
5752 if (CONSTANT_P (op0
))
5753 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5755 /* If OP0 is the result of a comparison, we weren't able to find what
5756 was really being compared, so fail. */
5758 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5761 /* Canonicalize any ordered comparison with integers involving equality
5762 if we can do computations in the relevant mode and we do not
5765 scalar_int_mode op0_mode
;
5766 if (CONST_INT_P (op1
)
5767 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &op0_mode
)
5768 && GET_MODE_PRECISION (op0_mode
) <= HOST_BITS_PER_WIDE_INT
)
5770 HOST_WIDE_INT const_val
= INTVAL (op1
);
5771 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5772 unsigned HOST_WIDE_INT max_val
5773 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (op0_mode
);
5778 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5779 code
= LT
, op1
= gen_int_mode (const_val
+ 1, op0_mode
);
5782 /* When cross-compiling, const_val might be sign-extended from
5783 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5785 if ((const_val
& max_val
)
5786 != (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (op0_mode
) - 1)))
5787 code
= GT
, op1
= gen_int_mode (const_val
- 1, op0_mode
);
5791 if (uconst_val
< max_val
)
5792 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, op0_mode
);
5796 if (uconst_val
!= 0)
5797 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, op0_mode
);
5805 /* Never return CC0; return zero instead. */
5809 /* We promised to return a comparison. */
5810 rtx ret
= gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5811 if (COMPARISON_P (ret
))
5816 /* Given a jump insn JUMP, return the condition that will cause it to branch
5817 to its JUMP_LABEL. If the condition cannot be understood, or is an
5818 inequality floating-point comparison which needs to be reversed, 0 will
5821 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5822 insn used in locating the condition was found. If a replacement test
5823 of the condition is desired, it should be placed in front of that
5824 insn and we will be sure that the inputs are still valid. If EARLIEST
5825 is null, the returned condition will be valid at INSN.
5827 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5828 compare CC mode register.
5830 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5833 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5834 int valid_at_insn_p
)
5840 /* If this is not a standard conditional jump, we can't parse it. */
5842 || ! any_condjump_p (jump
))
5844 set
= pc_set (jump
);
5846 cond
= XEXP (SET_SRC (set
), 0);
5848 /* If this branches to JUMP_LABEL when the condition is false, reverse
5851 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5852 && label_ref_label (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5854 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5855 allow_cc_mode
, valid_at_insn_p
);
5858 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5859 TARGET_MODE_REP_EXTENDED.
5861 Note that we assume that the property of
5862 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5863 narrower than mode B. I.e., if A is a mode narrower than B then in
5864 order to be able to operate on it in mode B, mode A needs to
5865 satisfy the requirements set by the representation of mode B. */
5868 init_num_sign_bit_copies_in_rep (void)
5870 opt_scalar_int_mode in_mode_iter
;
5871 scalar_int_mode mode
;
5873 FOR_EACH_MODE_IN_CLASS (in_mode_iter
, MODE_INT
)
5874 FOR_EACH_MODE_UNTIL (mode
, in_mode_iter
.require ())
5876 scalar_int_mode in_mode
= in_mode_iter
.require ();
5879 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5880 extends to the next widest mode. */
5881 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5882 || GET_MODE_WIDER_MODE (mode
).require () == in_mode
);
5884 /* We are in in_mode. Count how many bits outside of mode
5885 have to be copies of the sign-bit. */
5886 FOR_EACH_MODE (i
, mode
, in_mode
)
5888 /* This must always exist (for the last iteration it will be
5890 scalar_int_mode wider
= GET_MODE_WIDER_MODE (i
).require ();
5892 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5893 /* We can only check sign-bit copies starting from the
5894 top-bit. In order to be able to check the bits we
5895 have already seen we pretend that subsequent bits
5896 have to be sign-bit copies too. */
5897 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5898 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5899 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5904 /* Suppose that truncation from the machine mode of X to MODE is not a
5905 no-op. See if there is anything special about X so that we can
5906 assume it already contains a truncated value of MODE. */
5909 truncated_to_mode (machine_mode mode
, const_rtx x
)
5911 /* This register has already been used in MODE without explicit
5913 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5916 /* See if we already satisfy the requirements of MODE. If yes we
5917 can just switch to MODE. */
5918 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5919 && (num_sign_bit_copies (x
, GET_MODE (x
))
5920 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5926 /* Return true if RTX code CODE has a single sequence of zero or more
5927 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5928 entry in that case. */
5931 setup_reg_subrtx_bounds (unsigned int code
)
5933 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5935 for (; format
[i
] != 'e'; ++i
)
5938 /* No subrtxes. Leave start and count as 0. */
5940 if (format
[i
] == 'E' || format
[i
] == 'V')
5944 /* Record the sequence of 'e's. */
5945 rtx_all_subrtx_bounds
[code
].start
= i
;
5948 while (format
[i
] == 'e');
5949 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5950 /* rtl-iter.h relies on this. */
5951 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5953 for (; format
[i
]; ++i
)
5954 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5960 /* Initialize rtx_all_subrtx_bounds. */
5965 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5967 if (!setup_reg_subrtx_bounds (i
))
5968 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5969 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5970 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5973 init_num_sign_bit_copies_in_rep ();
5976 /* Check whether this is a constant pool constant. */
5978 constant_pool_constant_p (rtx x
)
5980 x
= avoid_constant_pool_reference (x
);
5981 return CONST_DOUBLE_P (x
);
5984 /* If M is a bitmask that selects a field of low-order bits within an item but
5985 not the entire word, return the length of the field. Return -1 otherwise.
5986 M is used in machine mode MODE. */
5989 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5991 if (mode
!= VOIDmode
)
5993 if (!HWI_COMPUTABLE_MODE_P (mode
))
5995 m
&= GET_MODE_MASK (mode
);
5998 return exact_log2 (m
+ 1);
6001 /* Return the mode of MEM's address. */
6004 get_address_mode (rtx mem
)
6008 gcc_assert (MEM_P (mem
));
6009 mode
= GET_MODE (XEXP (mem
, 0));
6010 if (mode
!= VOIDmode
)
6011 return as_a
<scalar_int_mode
> (mode
);
6012 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
6015 /* Split up a CONST_DOUBLE or integer constant rtx
6016 into two rtx's for single words,
6017 storing in *FIRST the word that comes first in memory in the target
6018 and in *SECOND the other.
6020 TODO: This function needs to be rewritten to work on any size
6024 split_double (rtx value
, rtx
*first
, rtx
*second
)
6026 if (CONST_INT_P (value
))
6028 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
6030 /* In this case the CONST_INT holds both target words.
6031 Extract the bits from it into two word-sized pieces.
6032 Sign extend each half to HOST_WIDE_INT. */
6033 unsigned HOST_WIDE_INT low
, high
;
6034 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
6035 unsigned bits_per_word
= BITS_PER_WORD
;
6037 /* Set sign_bit to the most significant bit of a word. */
6039 sign_bit
<<= bits_per_word
- 1;
6041 /* Set mask so that all bits of the word are set. We could
6042 have used 1 << BITS_PER_WORD instead of basing the
6043 calculation on sign_bit. However, on machines where
6044 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6045 compiler warning, even though the code would never be
6047 mask
= sign_bit
<< 1;
6050 /* Set sign_extend as any remaining bits. */
6051 sign_extend
= ~mask
;
6053 /* Pick the lower word and sign-extend it. */
6054 low
= INTVAL (value
);
6059 /* Pick the higher word, shifted to the least significant
6060 bits, and sign-extend it. */
6061 high
= INTVAL (value
);
6062 high
>>= bits_per_word
- 1;
6065 if (high
& sign_bit
)
6066 high
|= sign_extend
;
6068 /* Store the words in the target machine order. */
6069 if (WORDS_BIG_ENDIAN
)
6071 *first
= GEN_INT (high
);
6072 *second
= GEN_INT (low
);
6076 *first
= GEN_INT (low
);
6077 *second
= GEN_INT (high
);
6082 /* The rule for using CONST_INT for a wider mode
6083 is that we regard the value as signed.
6084 So sign-extend it. */
6085 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
6086 if (WORDS_BIG_ENDIAN
)
6098 else if (GET_CODE (value
) == CONST_WIDE_INT
)
6100 /* All of this is scary code and needs to be converted to
6101 properly work with any size integer. */
6102 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
6103 if (WORDS_BIG_ENDIAN
)
6105 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6106 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6110 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
6111 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
6114 else if (!CONST_DOUBLE_P (value
))
6116 if (WORDS_BIG_ENDIAN
)
6118 *first
= const0_rtx
;
6124 *second
= const0_rtx
;
6127 else if (GET_MODE (value
) == VOIDmode
6128 /* This is the old way we did CONST_DOUBLE integers. */
6129 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
6131 /* In an integer, the words are defined as most and least significant.
6132 So order them by the target's convention. */
6133 if (WORDS_BIG_ENDIAN
)
6135 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6136 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
6140 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
6141 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
6148 /* Note, this converts the REAL_VALUE_TYPE to the target's
6149 format, splits up the floating point double and outputs
6150 exactly 32 bits of it into each of l[0] and l[1] --
6151 not necessarily BITS_PER_WORD bits. */
6152 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value
), l
);
6154 /* If 32 bits is an entire word for the target, but not for the host,
6155 then sign-extend on the host so that the number will look the same
6156 way on the host that it would on the target. See for instance
6157 simplify_unary_operation. The #if is needed to avoid compiler
6160 #if HOST_BITS_PER_LONG > 32
6161 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
6163 if (l
[0] & ((long) 1 << 31))
6164 l
[0] |= ((unsigned long) (-1) << 32);
6165 if (l
[1] & ((long) 1 << 31))
6166 l
[1] |= ((unsigned long) (-1) << 32);
6170 *first
= GEN_INT (l
[0]);
6171 *second
= GEN_INT (l
[1]);
6175 /* Return true if X is a sign_extract or zero_extract from the least
6179 lsb_bitfield_op_p (rtx x
)
6181 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
6183 machine_mode mode
= GET_MODE (XEXP (x
, 0));
6184 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
6185 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
6186 poly_int64 remaining_bits
= GET_MODE_PRECISION (mode
) - len
;
6188 return known_eq (pos
, BITS_BIG_ENDIAN
? remaining_bits
: 0);
6193 /* Strip outer address "mutations" from LOC and return a pointer to the
6194 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6195 stripped expression there.
6197 "Mutations" either convert between modes or apply some kind of
6198 extension, truncation or alignment. */
6201 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
6205 enum rtx_code code
= GET_CODE (*loc
);
6206 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
6207 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6208 used to convert between pointer sizes. */
6209 loc
= &XEXP (*loc
, 0);
6210 else if (lsb_bitfield_op_p (*loc
))
6211 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6212 acts as a combined truncation and extension. */
6213 loc
= &XEXP (*loc
, 0);
6214 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
6215 /* (and ... (const_int -X)) is used to align to X bytes. */
6216 loc
= &XEXP (*loc
, 0);
6217 else if (code
== SUBREG
6218 && !OBJECT_P (SUBREG_REG (*loc
))
6219 && subreg_lowpart_p (*loc
))
6220 /* (subreg (operator ...) ...) inside and is used for mode
6222 loc
= &SUBREG_REG (*loc
);
6230 /* Return true if CODE applies some kind of scale. The scaled value is
6231 is the first operand and the scale is the second. */
6234 binary_scale_code_p (enum rtx_code code
)
6236 return (code
== MULT
6238 /* Needed by ARM targets. */
6242 || code
== ROTATERT
);
6245 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6246 (see address_info). Return null otherwise. */
6249 get_base_term (rtx
*inner
)
6251 if (GET_CODE (*inner
) == LO_SUM
)
6252 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6255 || GET_CODE (*inner
) == SUBREG
6256 || GET_CODE (*inner
) == SCRATCH
)
6261 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6262 (see address_info). Return null otherwise. */
6265 get_index_term (rtx
*inner
)
6267 /* At present, only constant scales are allowed. */
6268 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
6269 inner
= strip_address_mutations (&XEXP (*inner
, 0));
6272 || GET_CODE (*inner
) == SUBREG
6273 || GET_CODE (*inner
) == SCRATCH
)
6278 /* Set the segment part of address INFO to LOC, given that INNER is the
6282 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6284 gcc_assert (!info
->segment
);
6285 info
->segment
= loc
;
6286 info
->segment_term
= inner
;
6289 /* Set the base part of address INFO to LOC, given that INNER is the
6293 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6295 gcc_assert (!info
->base
);
6297 info
->base_term
= inner
;
6300 /* Set the index part of address INFO to LOC, given that INNER is the
6304 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6306 gcc_assert (!info
->index
);
6308 info
->index_term
= inner
;
6311 /* Set the displacement part of address INFO to LOC, given that INNER
6312 is the constant term. */
6315 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
6317 gcc_assert (!info
->disp
);
6319 info
->disp_term
= inner
;
6322 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6323 rest of INFO accordingly. */
6326 decompose_incdec_address (struct address_info
*info
)
6328 info
->autoinc_p
= true;
6330 rtx
*base
= &XEXP (*info
->inner
, 0);
6331 set_address_base (info
, base
, base
);
6332 gcc_checking_assert (info
->base
== info
->base_term
);
6334 /* These addresses are only valid when the size of the addressed
6336 gcc_checking_assert (info
->mode
!= VOIDmode
);
6339 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6340 of INFO accordingly. */
6343 decompose_automod_address (struct address_info
*info
)
6345 info
->autoinc_p
= true;
6347 rtx
*base
= &XEXP (*info
->inner
, 0);
6348 set_address_base (info
, base
, base
);
6349 gcc_checking_assert (info
->base
== info
->base_term
);
6351 rtx plus
= XEXP (*info
->inner
, 1);
6352 gcc_assert (GET_CODE (plus
) == PLUS
);
6354 info
->base_term2
= &XEXP (plus
, 0);
6355 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
6357 rtx
*step
= &XEXP (plus
, 1);
6358 rtx
*inner_step
= strip_address_mutations (step
);
6359 if (CONSTANT_P (*inner_step
))
6360 set_address_disp (info
, step
, inner_step
);
6362 set_address_index (info
, step
, inner_step
);
6365 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6366 values in [PTR, END). Return a pointer to the end of the used array. */
6369 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
6372 if (GET_CODE (x
) == PLUS
)
6374 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
6375 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
6379 gcc_assert (ptr
!= end
);
6385 /* Evaluate the likelihood of X being a base or index value, returning
6386 positive if it is likely to be a base, negative if it is likely to be
6387 an index, and 0 if we can't tell. Make the magnitude of the return
6388 value reflect the amount of confidence we have in the answer.
6390 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6393 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
6394 enum rtx_code outer_code
, enum rtx_code index_code
)
6396 /* Believe *_POINTER unless the address shape requires otherwise. */
6397 if (REG_P (x
) && REG_POINTER (x
))
6399 if (MEM_P (x
) && MEM_POINTER (x
))
6402 if (REG_P (x
) && HARD_REGISTER_P (x
))
6404 /* X is a hard register. If it only fits one of the base
6405 or index classes, choose that interpretation. */
6406 int regno
= REGNO (x
);
6407 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
6408 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
6409 if (base_p
!= index_p
)
6410 return base_p
? 1 : -1;
6415 /* INFO->INNER describes a normal, non-automodified address.
6416 Fill in the rest of INFO accordingly. */
6419 decompose_normal_address (struct address_info
*info
)
6421 /* Treat the address as the sum of up to four values. */
6423 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
6424 ops
+ ARRAY_SIZE (ops
)) - ops
;
6426 /* If there is more than one component, any base component is in a PLUS. */
6428 info
->base_outer_code
= PLUS
;
6430 /* Try to classify each sum operand now. Leave those that could be
6431 either a base or an index in OPS. */
6434 for (size_t in
= 0; in
< n_ops
; ++in
)
6437 rtx
*inner
= strip_address_mutations (loc
);
6438 if (CONSTANT_P (*inner
))
6439 set_address_disp (info
, loc
, inner
);
6440 else if (GET_CODE (*inner
) == UNSPEC
)
6441 set_address_segment (info
, loc
, inner
);
6444 /* The only other possibilities are a base or an index. */
6445 rtx
*base_term
= get_base_term (inner
);
6446 rtx
*index_term
= get_index_term (inner
);
6447 gcc_assert (base_term
|| index_term
);
6449 set_address_index (info
, loc
, index_term
);
6450 else if (!index_term
)
6451 set_address_base (info
, loc
, base_term
);
6454 gcc_assert (base_term
== index_term
);
6456 inner_ops
[out
] = base_term
;
6462 /* Classify the remaining OPS members as bases and indexes. */
6465 /* If we haven't seen a base or an index yet, assume that this is
6466 the base. If we were confident that another term was the base
6467 or index, treat the remaining operand as the other kind. */
6469 set_address_base (info
, ops
[0], inner_ops
[0]);
6471 set_address_index (info
, ops
[0], inner_ops
[0]);
6475 /* In the event of a tie, assume the base comes first. */
6476 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
6478 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
6479 GET_CODE (*ops
[0])))
6481 set_address_base (info
, ops
[0], inner_ops
[0]);
6482 set_address_index (info
, ops
[1], inner_ops
[1]);
6486 set_address_base (info
, ops
[1], inner_ops
[1]);
6487 set_address_index (info
, ops
[0], inner_ops
[0]);
6491 gcc_assert (out
== 0);
6494 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6495 or VOIDmode if not known. AS is the address space associated with LOC.
6496 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6499 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
6500 addr_space_t as
, enum rtx_code outer_code
)
6502 memset (info
, 0, sizeof (*info
));
6505 info
->addr_outer_code
= outer_code
;
6507 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6508 info
->base_outer_code
= outer_code
;
6509 switch (GET_CODE (*info
->inner
))
6515 decompose_incdec_address (info
);
6520 decompose_automod_address (info
);
6524 decompose_normal_address (info
);
6529 /* Describe address operand LOC in INFO. */
6532 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6534 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6537 /* Describe the address of MEM X in INFO. */
6540 decompose_mem_address (struct address_info
*info
, rtx x
)
6542 gcc_assert (MEM_P (x
));
6543 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6544 MEM_ADDR_SPACE (x
), MEM
);
6547 /* Update INFO after a change to the address it describes. */
6550 update_address (struct address_info
*info
)
6552 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6553 info
->addr_outer_code
);
6556 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6557 more complicated than that. */
6560 get_index_scale (const struct address_info
*info
)
6562 rtx index
= *info
->index
;
6563 if (GET_CODE (index
) == MULT
6564 && CONST_INT_P (XEXP (index
, 1))
6565 && info
->index_term
== &XEXP (index
, 0))
6566 return INTVAL (XEXP (index
, 1));
6568 if (GET_CODE (index
) == ASHIFT
6569 && CONST_INT_P (XEXP (index
, 1))
6570 && info
->index_term
== &XEXP (index
, 0))
6571 return HOST_WIDE_INT_1
<< INTVAL (XEXP (index
, 1));
6573 if (info
->index
== info
->index_term
)
6579 /* Return the "index code" of INFO, in the form required by
6583 get_index_code (const struct address_info
*info
)
6586 return GET_CODE (*info
->index
);
6589 return GET_CODE (*info
->disp
);
6594 /* Return true if RTL X contains a SYMBOL_REF. */
6597 contains_symbol_ref_p (const_rtx x
)
6599 subrtx_iterator::array_type array
;
6600 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6601 if (SYMBOL_REF_P (*iter
))
6607 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6610 contains_symbolic_reference_p (const_rtx x
)
6612 subrtx_iterator::array_type array
;
6613 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6614 if (SYMBOL_REF_P (*iter
) || GET_CODE (*iter
) == LABEL_REF
)
6620 /* Return true if RTL X contains a constant pool address. */
6623 contains_constant_pool_address_p (const_rtx x
)
6625 subrtx_iterator::array_type array
;
6626 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6627 if (SYMBOL_REF_P (*iter
) && CONSTANT_POOL_ADDRESS_P (*iter
))
6634 /* Return true if X contains a thread-local symbol. */
6637 tls_referenced_p (const_rtx x
)
6639 if (!targetm
.have_tls
)
6642 subrtx_iterator::array_type array
;
6643 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6644 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)