1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
100 enum machine_mode
, rtx
, rtx
));
101 static void check_fold_consts
PARAMS ((PTR
));
102 static rtx avoid_constant_pool_reference
PARAMS ((rtx
));
104 /* Make a binary operation by properly ordering the operands and
105 seeing if the expression folds. */
108 simplify_gen_binary (code
, mode
, op0
, op1
)
110 enum machine_mode mode
;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code
) == 'c'
117 && swap_commutative_operands_p (op0
, op1
))
118 tem
= op0
, op0
= op1
, op1
= tem
;
120 /* If this simplifies, do it. */
121 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
126 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
127 just form the operation. */
129 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
130 && GET_MODE (op0
) != VOIDmode
)
131 return plus_constant (op0
, INTVAL (op1
));
132 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
133 && GET_MODE (op0
) != VOIDmode
)
134 return plus_constant (op0
, - INTVAL (op1
));
136 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
139 /* In case X is MEM referencing constant pool, return the real value.
140 Otherwise return X. */
142 avoid_constant_pool_reference (x
)
145 if (GET_CODE (x
) != MEM
)
147 if (GET_CODE (XEXP (x
, 0)) != SYMBOL_REF
148 || !CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
150 return get_pool_constant (XEXP (x
, 0));
153 /* Make a unary operation by first seeing if it folds and otherwise making
154 the specified operation. */
157 simplify_gen_unary (code
, mode
, op
, op_mode
)
159 enum machine_mode mode
;
161 enum machine_mode op_mode
;
165 /* If this simplifies, use it. */
166 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
169 return gen_rtx_fmt_e (code
, mode
, op
);
172 /* Likewise for ternary operations. */
175 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
177 enum machine_mode mode
, op0_mode
;
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
197 enum machine_mode mode
;
198 enum machine_mode cmp_mode
;
203 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
206 /* Put complex operands first and constants second. */
207 if (swap_commutative_operands_p (op0
, op1
))
208 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
210 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
213 /* Replace all occurrences of OLD in X with NEW and try to simplify the
214 resulting RTX. Return a new RTX which is as simplified as possible. */
217 simplify_replace_rtx (x
, old
, new)
222 enum rtx_code code
= GET_CODE (x
);
223 enum machine_mode mode
= GET_MODE (x
);
225 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
226 to build a new expression substituting recursively. If we can't do
227 anything, return our input. */
232 switch (GET_RTX_CLASS (code
))
236 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
237 rtx op
= (XEXP (x
, 0) == old
238 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
240 return simplify_gen_unary (code
, mode
, op
, op_mode
);
246 simplify_gen_binary (code
, mode
,
247 simplify_replace_rtx (XEXP (x
, 0), old
, new),
248 simplify_replace_rtx (XEXP (x
, 1), old
, new));
251 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
252 ? GET_MODE (XEXP (x
, 0))
253 : GET_MODE (XEXP (x
, 1)));
254 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
255 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
258 simplify_gen_relational (code
, mode
,
261 : GET_MODE (op0
) != VOIDmode
270 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
271 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
274 simplify_gen_ternary (code
, mode
,
279 simplify_replace_rtx (XEXP (x
, 1), old
, new),
280 simplify_replace_rtx (XEXP (x
, 2), old
, new));
284 /* The only case we try to handle is a SUBREG. */
288 exp
= simplify_gen_subreg (GET_MODE (x
),
289 simplify_replace_rtx (SUBREG_REG (x
),
291 GET_MODE (SUBREG_REG (x
)),
299 if (GET_CODE (x
) == MEM
)
301 replace_equiv_address_nv (x
,
302 simplify_replace_rtx (XEXP (x
, 0),
310 /* Try to simplify a unary operation CODE whose output mode is to be
311 MODE with input operand OP whose mode was originally OP_MODE.
312 Return zero if no simplification can be made. */
315 simplify_unary_operation (code
, mode
, op
, op_mode
)
317 enum machine_mode mode
;
319 enum machine_mode op_mode
;
321 unsigned int width
= GET_MODE_BITSIZE (mode
);
322 rtx trueop
= avoid_constant_pool_reference (op
);
324 /* The order of these tests is critical so that, for example, we don't
325 check the wrong mode (input vs. output) for a conversion operation,
326 such as FIX. At some point, this should be simplified. */
328 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
330 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
331 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
333 HOST_WIDE_INT hv
, lv
;
336 if (GET_CODE (trueop
) == CONST_INT
)
337 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
339 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
341 #ifdef REAL_ARITHMETIC
342 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
347 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
348 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
349 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
355 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
356 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
357 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
359 #endif /* REAL_ARITHMETIC */
360 d
= real_value_truncate (mode
, d
);
361 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
363 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
364 && (GET_CODE (trueop
) == CONST_DOUBLE
365 || GET_CODE (trueop
) == CONST_INT
))
367 HOST_WIDE_INT hv
, lv
;
370 if (GET_CODE (trueop
) == CONST_INT
)
371 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
373 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
375 if (op_mode
== VOIDmode
)
377 /* We don't know how to interpret negative-looking numbers in
378 this case, so don't try to fold those. */
382 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
385 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
387 #ifdef REAL_ARITHMETIC
388 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
391 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
392 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
393 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
394 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
395 #endif /* REAL_ARITHMETIC */
396 d
= real_value_truncate (mode
, d
);
397 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
401 if (GET_CODE (trueop
) == CONST_INT
402 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
404 register HOST_WIDE_INT arg0
= INTVAL (trueop
);
405 register HOST_WIDE_INT val
;
418 val
= (arg0
>= 0 ? arg0
: - arg0
);
422 /* Don't use ffs here. Instead, get low order bit and then its
423 number. If arg0 is zero, this will return 0, as desired. */
424 arg0
&= GET_MODE_MASK (mode
);
425 val
= exact_log2 (arg0
& (- arg0
)) + 1;
433 if (op_mode
== VOIDmode
)
435 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
437 /* If we were really extending the mode,
438 we would have to distinguish between zero-extension
439 and sign-extension. */
440 if (width
!= GET_MODE_BITSIZE (op_mode
))
444 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
445 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
451 if (op_mode
== VOIDmode
)
453 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
455 /* If we were really extending the mode,
456 we would have to distinguish between zero-extension
457 and sign-extension. */
458 if (width
!= GET_MODE_BITSIZE (op_mode
))
462 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
465 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
467 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
468 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
483 val
= trunc_int_for_mode (val
, mode
);
485 return GEN_INT (val
);
488 /* We can do some operations on integer CONST_DOUBLEs. Also allow
489 for a DImode operation on a CONST_INT. */
490 else if (GET_MODE (trueop
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
491 && (GET_CODE (trueop
) == CONST_DOUBLE
492 || GET_CODE (trueop
) == CONST_INT
))
494 unsigned HOST_WIDE_INT l1
, lv
;
495 HOST_WIDE_INT h1
, hv
;
497 if (GET_CODE (trueop
) == CONST_DOUBLE
)
498 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
500 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
510 neg_double (l1
, h1
, &lv
, &hv
);
515 neg_double (l1
, h1
, &lv
, &hv
);
523 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
525 lv
= exact_log2 (l1
& (-l1
)) + 1;
529 /* This is just a change-of-mode, so do nothing. */
534 if (op_mode
== VOIDmode
535 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
539 lv
= l1
& GET_MODE_MASK (op_mode
);
543 if (op_mode
== VOIDmode
544 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
548 lv
= l1
& GET_MODE_MASK (op_mode
);
549 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
550 && (lv
& ((HOST_WIDE_INT
) 1
551 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
552 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
554 hv
= HWI_SIGN_EXTEND (lv
);
565 return immed_double_const (lv
, hv
, mode
);
568 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
569 else if (GET_CODE (trueop
) == CONST_DOUBLE
570 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
576 if (setjmp (handler
))
577 /* There used to be a warning here, but that is inadvisable.
578 People may want to cause traps, and the natural way
579 to do it should not get a warning. */
582 set_float_handler (handler
);
584 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
589 d
= REAL_VALUE_NEGATE (d
);
593 if (REAL_VALUE_NEGATIVE (d
))
594 d
= REAL_VALUE_NEGATE (d
);
598 d
= real_value_truncate (mode
, d
);
602 /* All this does is change the mode. */
606 d
= REAL_VALUE_RNDZINT (d
);
610 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
620 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
621 set_float_handler (NULL
);
625 else if (GET_CODE (trueop
) == CONST_DOUBLE
626 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
627 && GET_MODE_CLASS (mode
) == MODE_INT
628 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
634 if (setjmp (handler
))
637 set_float_handler (handler
);
639 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
644 val
= REAL_VALUE_FIX (d
);
648 val
= REAL_VALUE_UNSIGNED_FIX (d
);
655 set_float_handler (NULL
);
657 val
= trunc_int_for_mode (val
, mode
);
659 return GEN_INT (val
);
662 /* This was formerly used only for non-IEEE float.
663 eggert@twinsun.com says it is safe for IEEE also. */
666 enum rtx_code reversed
;
667 /* There are some simplifications we can do even if the operands
672 /* (not (not X)) == X. */
673 if (GET_CODE (op
) == NOT
)
676 /* (not (eq X Y)) == (ne X Y), etc. */
677 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
678 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
680 return gen_rtx_fmt_ee (reversed
,
681 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
685 /* (neg (neg X)) == X. */
686 if (GET_CODE (op
) == NEG
)
691 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
692 becomes just the MINUS if its mode is MODE. This allows
693 folding switch statements on machines using casesi (such as
695 if (GET_CODE (op
) == TRUNCATE
696 && GET_MODE (XEXP (op
, 0)) == mode
697 && GET_CODE (XEXP (op
, 0)) == MINUS
698 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
699 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
702 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
703 if (! POINTERS_EXTEND_UNSIGNED
704 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
706 || (GET_CODE (op
) == SUBREG
707 && GET_CODE (SUBREG_REG (op
)) == REG
708 && REG_POINTER (SUBREG_REG (op
))
709 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
710 return convert_memory_address (Pmode
, op
);
714 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
716 if (POINTERS_EXTEND_UNSIGNED
> 0
717 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
719 || (GET_CODE (op
) == SUBREG
720 && GET_CODE (SUBREG_REG (op
)) == REG
721 && REG_POINTER (SUBREG_REG (op
))
722 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
723 return convert_memory_address (Pmode
, op
);
735 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
736 and OP1. Return 0 if no simplification is possible.
738 Don't use this for relational operations such as EQ or LT.
739 Use simplify_relational_operation instead. */
742 simplify_binary_operation (code
, mode
, op0
, op1
)
744 enum machine_mode mode
;
747 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
749 unsigned int width
= GET_MODE_BITSIZE (mode
);
751 rtx trueop0
= avoid_constant_pool_reference (op0
);
752 rtx trueop1
= avoid_constant_pool_reference (op1
);
754 /* Relational operations don't work here. We must know the mode
755 of the operands in order to do the comparison correctly.
756 Assuming a full word can give incorrect results.
757 Consider comparing 128 with -128 in QImode. */
759 if (GET_RTX_CLASS (code
) == '<')
762 /* Make sure the constant is second. */
763 if (GET_RTX_CLASS (code
) == 'c'
764 && swap_commutative_operands_p (trueop0
, trueop1
))
766 tem
= op0
, op0
= op1
, op1
= tem
;
767 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
770 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
771 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
772 && GET_CODE (trueop0
) == CONST_DOUBLE
773 && GET_CODE (trueop1
) == CONST_DOUBLE
774 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
776 REAL_VALUE_TYPE f0
, f1
, value
;
779 if (setjmp (handler
))
782 set_float_handler (handler
);
784 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
785 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
786 f0
= real_value_truncate (mode
, f0
);
787 f1
= real_value_truncate (mode
, f1
);
789 #ifdef REAL_ARITHMETIC
790 #ifndef REAL_INFINITY
791 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
794 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
808 #ifndef REAL_INFINITY
815 value
= MIN (f0
, f1
);
818 value
= MAX (f0
, f1
);
825 value
= real_value_truncate (mode
, value
);
826 set_float_handler (NULL
);
827 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
829 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
831 /* We can fold some multi-word operations. */
832 if (GET_MODE_CLASS (mode
) == MODE_INT
833 && width
== HOST_BITS_PER_WIDE_INT
* 2
834 && (GET_CODE (trueop0
) == CONST_DOUBLE
835 || GET_CODE (trueop0
) == CONST_INT
)
836 && (GET_CODE (trueop1
) == CONST_DOUBLE
837 || GET_CODE (trueop1
) == CONST_INT
))
839 unsigned HOST_WIDE_INT l1
, l2
, lv
;
840 HOST_WIDE_INT h1
, h2
, hv
;
842 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
843 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
845 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
847 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
848 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
850 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
855 /* A - B == A + (-B). */
856 neg_double (l2
, h2
, &lv
, &hv
);
859 /* .. fall through ... */
862 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
866 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
869 case DIV
: case MOD
: case UDIV
: case UMOD
:
870 /* We'd need to include tree.h to do this and it doesn't seem worth
875 lv
= l1
& l2
, hv
= h1
& h2
;
879 lv
= l1
| l2
, hv
= h1
| h2
;
883 lv
= l1
^ l2
, hv
= h1
^ h2
;
889 && ((unsigned HOST_WIDE_INT
) l1
890 < (unsigned HOST_WIDE_INT
) l2
)))
899 && ((unsigned HOST_WIDE_INT
) l1
900 > (unsigned HOST_WIDE_INT
) l2
)))
907 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
909 && ((unsigned HOST_WIDE_INT
) l1
910 < (unsigned HOST_WIDE_INT
) l2
)))
917 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
919 && ((unsigned HOST_WIDE_INT
) l1
920 > (unsigned HOST_WIDE_INT
) l2
)))
926 case LSHIFTRT
: case ASHIFTRT
:
928 case ROTATE
: case ROTATERT
:
929 #ifdef SHIFT_COUNT_TRUNCATED
930 if (SHIFT_COUNT_TRUNCATED
)
931 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
934 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
937 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
938 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
940 else if (code
== ASHIFT
)
941 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
942 else if (code
== ROTATE
)
943 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
944 else /* code == ROTATERT */
945 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
952 return immed_double_const (lv
, hv
, mode
);
955 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
956 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
958 /* Even if we can't compute a constant result,
959 there are some cases worth simplifying. */
964 /* In IEEE floating point, x+0 is not the same as x. Similarly
965 for the other optimizations below. */
966 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
967 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
970 if (trueop1
== CONST0_RTX (mode
))
973 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
974 if (GET_CODE (op0
) == NEG
)
975 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
976 else if (GET_CODE (op1
) == NEG
)
977 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
980 if (INTEGRAL_MODE_P (mode
)
981 && GET_CODE (op0
) == NOT
982 && trueop1
== const1_rtx
)
983 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
985 /* Handle both-operands-constant cases. We can only add
986 CONST_INTs to constants since the sum of relocatable symbols
987 can't be handled by most assemblers. Don't add CONST_INT
988 to CONST_INT since overflow won't be computed properly if wider
989 than HOST_BITS_PER_WIDE_INT. */
991 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
992 && GET_CODE (op1
) == CONST_INT
)
993 return plus_constant (op0
, INTVAL (op1
));
994 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
995 && GET_CODE (op0
) == CONST_INT
)
996 return plus_constant (op1
, INTVAL (op0
));
998 /* See if this is something like X * C - X or vice versa or
999 if the multiplication is written as a shift. If so, we can
1000 distribute and make a new multiply, shift, or maybe just
1001 have X (if C is 2 in the example above). But don't make
1002 real multiply if we didn't have one before. */
1004 if (! FLOAT_MODE_P (mode
))
1006 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1007 rtx lhs
= op0
, rhs
= op1
;
1010 if (GET_CODE (lhs
) == NEG
)
1011 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1012 else if (GET_CODE (lhs
) == MULT
1013 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1015 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1018 else if (GET_CODE (lhs
) == ASHIFT
1019 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1020 && INTVAL (XEXP (lhs
, 1)) >= 0
1021 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1023 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1024 lhs
= XEXP (lhs
, 0);
1027 if (GET_CODE (rhs
) == NEG
)
1028 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1029 else if (GET_CODE (rhs
) == MULT
1030 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1032 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1035 else if (GET_CODE (rhs
) == ASHIFT
1036 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1037 && INTVAL (XEXP (rhs
, 1)) >= 0
1038 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1040 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1041 rhs
= XEXP (rhs
, 0);
1044 if (rtx_equal_p (lhs
, rhs
))
1046 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1047 GEN_INT (coeff0
+ coeff1
));
1048 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1052 /* If one of the operands is a PLUS or a MINUS, see if we can
1053 simplify this by the associative law.
1054 Don't use the associative law for floating point.
1055 The inaccuracy makes it nonassociative,
1056 and subtle programs can break if operations are associated. */
1058 if (INTEGRAL_MODE_P (mode
)
1059 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1060 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1061 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1067 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1068 using cc0, in which case we want to leave it as a COMPARE
1069 so we can distinguish it from a register-register-copy.
1071 In IEEE floating point, x-0 is not the same as x. */
1073 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1074 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1075 && trueop1
== CONST0_RTX (mode
))
1079 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1080 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1081 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1082 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1084 rtx xop00
= XEXP (op0
, 0);
1085 rtx xop10
= XEXP (op1
, 0);
1088 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1090 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1091 && GET_MODE (xop00
) == GET_MODE (xop10
)
1092 && REGNO (xop00
) == REGNO (xop10
)
1093 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1094 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1101 /* None of these optimizations can be done for IEEE
1103 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1104 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1107 /* We can't assume x-x is 0 even with non-IEEE floating point,
1108 but since it is zero except in very strange circumstances, we
1109 will treat it as zero with -funsafe-math-optimizations. */
1110 if (rtx_equal_p (trueop0
, trueop1
)
1111 && ! side_effects_p (op0
)
1112 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1113 return CONST0_RTX (mode
);
1115 /* Change subtraction from zero into negation. */
1116 if (trueop0
== CONST0_RTX (mode
))
1117 return gen_rtx_NEG (mode
, op1
);
1119 /* (-1 - a) is ~a. */
1120 if (trueop0
== constm1_rtx
)
1121 return gen_rtx_NOT (mode
, op1
);
1123 /* Subtracting 0 has no effect. */
1124 if (trueop1
== CONST0_RTX (mode
))
1127 /* See if this is something like X * C - X or vice versa or
1128 if the multiplication is written as a shift. If so, we can
1129 distribute and make a new multiply, shift, or maybe just
1130 have X (if C is 2 in the example above). But don't make
1131 real multiply if we didn't have one before. */
1133 if (! FLOAT_MODE_P (mode
))
1135 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1136 rtx lhs
= op0
, rhs
= op1
;
1139 if (GET_CODE (lhs
) == NEG
)
1140 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1141 else if (GET_CODE (lhs
) == MULT
1142 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1144 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1147 else if (GET_CODE (lhs
) == ASHIFT
1148 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1149 && INTVAL (XEXP (lhs
, 1)) >= 0
1150 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1152 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1153 lhs
= XEXP (lhs
, 0);
1156 if (GET_CODE (rhs
) == NEG
)
1157 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1158 else if (GET_CODE (rhs
) == MULT
1159 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1161 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1164 else if (GET_CODE (rhs
) == ASHIFT
1165 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1166 && INTVAL (XEXP (rhs
, 1)) >= 0
1167 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1169 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1170 rhs
= XEXP (rhs
, 0);
1173 if (rtx_equal_p (lhs
, rhs
))
1175 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1176 GEN_INT (coeff0
- coeff1
));
1177 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1181 /* (a - (-b)) -> (a + b). */
1182 if (GET_CODE (op1
) == NEG
)
1183 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1185 /* If one of the operands is a PLUS or a MINUS, see if we can
1186 simplify this by the associative law.
1187 Don't use the associative law for floating point.
1188 The inaccuracy makes it nonassociative,
1189 and subtle programs can break if operations are associated. */
1191 if (INTEGRAL_MODE_P (mode
)
1192 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1193 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1194 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1197 /* Don't let a relocatable value get a negative coeff. */
1198 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1199 return plus_constant (op0
, - INTVAL (op1
));
1201 /* (x - (x & y)) -> (x & ~y) */
1202 if (GET_CODE (op1
) == AND
)
1204 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1205 return simplify_gen_binary (AND
, mode
, op0
,
1206 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1207 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1208 return simplify_gen_binary (AND
, mode
, op0
,
1209 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1214 if (trueop1
== constm1_rtx
)
1216 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1218 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1221 /* In IEEE floating point, x*0 is not always 0. */
1222 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1223 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1224 && trueop1
== CONST0_RTX (mode
)
1225 && ! side_effects_p (op0
))
1228 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1229 However, ANSI says we can drop signals,
1230 so we can do this anyway. */
1231 if (trueop1
== CONST1_RTX (mode
))
1234 /* Convert multiply by constant power of two into shift unless
1235 we are still generating RTL. This test is a kludge. */
1236 if (GET_CODE (trueop1
) == CONST_INT
1237 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1238 /* If the mode is larger than the host word size, and the
1239 uppermost bit is set, then this isn't a power of two due
1240 to implicit sign extension. */
1241 && (width
<= HOST_BITS_PER_WIDE_INT
1242 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1243 && ! rtx_equal_function_value_matters
)
1244 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1246 if (GET_CODE (trueop1
) == CONST_DOUBLE
1247 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
)
1251 int op1is2
, op1ism1
;
1253 if (setjmp (handler
))
1256 set_float_handler (handler
);
1257 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1258 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1259 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1260 set_float_handler (NULL
);
1262 /* x*2 is x+x and x*(-1) is -x */
1263 if (op1is2
&& GET_MODE (op0
) == mode
)
1264 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1266 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1267 return gen_rtx_NEG (mode
, op0
);
1272 if (trueop1
== const0_rtx
)
1274 if (GET_CODE (trueop1
) == CONST_INT
1275 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1276 == GET_MODE_MASK (mode
)))
1278 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1280 /* A | (~A) -> -1 */
1281 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1282 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1283 && ! side_effects_p (op0
)
1284 && GET_MODE_CLASS (mode
) != MODE_CC
)
1289 if (trueop1
== const0_rtx
)
1291 if (GET_CODE (trueop1
) == CONST_INT
1292 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1293 == GET_MODE_MASK (mode
)))
1294 return gen_rtx_NOT (mode
, op0
);
1295 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1296 && GET_MODE_CLASS (mode
) != MODE_CC
)
1301 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1303 if (GET_CODE (trueop1
) == CONST_INT
1304 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1305 == GET_MODE_MASK (mode
)))
1307 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1308 && GET_MODE_CLASS (mode
) != MODE_CC
)
1311 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1312 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1313 && ! side_effects_p (op0
)
1314 && GET_MODE_CLASS (mode
) != MODE_CC
)
1319 /* Convert divide by power of two into shift (divide by 1 handled
1321 if (GET_CODE (trueop1
) == CONST_INT
1322 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1323 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1325 /* ... fall through ... */
1328 if (trueop1
== CONST1_RTX (mode
))
1331 /* In IEEE floating point, 0/x is not always 0. */
1332 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1333 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1334 && trueop0
== CONST0_RTX (mode
)
1335 && ! side_effects_p (op1
))
1338 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1339 /* Change division by a constant into multiplication. Only do
1340 this with -funsafe-math-optimizations. */
1341 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1342 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1343 && trueop1
!= CONST0_RTX (mode
)
1344 && flag_unsafe_math_optimizations
)
1347 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1349 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1351 #if defined (REAL_ARITHMETIC)
1352 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1353 return gen_rtx_MULT (mode
, op0
,
1354 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1357 gen_rtx_MULT (mode
, op0
,
1358 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1366 /* Handle modulus by power of two (mod with 1 handled below). */
1367 if (GET_CODE (trueop1
) == CONST_INT
1368 && exact_log2 (INTVAL (trueop1
)) > 0)
1369 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1371 /* ... fall through ... */
1374 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1375 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1381 /* Rotating ~0 always results in ~0. */
1382 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1383 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1384 && ! side_effects_p (op1
))
1387 /* ... fall through ... */
1392 if (trueop1
== const0_rtx
)
1394 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1399 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1400 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1401 && ! side_effects_p (op0
))
1403 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1408 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1409 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1410 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1411 && ! side_effects_p (op0
))
1413 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1418 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1420 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1425 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1427 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1438 /* Get the integer argument values in two forms:
1439 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1441 arg0
= INTVAL (trueop0
);
1442 arg1
= INTVAL (trueop1
);
1444 if (width
< HOST_BITS_PER_WIDE_INT
)
1446 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1447 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1450 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1451 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1454 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1455 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1463 /* Compute the value of the arithmetic. */
1468 val
= arg0s
+ arg1s
;
1472 val
= arg0s
- arg1s
;
1476 val
= arg0s
* arg1s
;
1481 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1484 val
= arg0s
/ arg1s
;
1489 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1492 val
= arg0s
% arg1s
;
1497 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1500 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1505 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1508 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1524 /* If shift count is undefined, don't fold it; let the machine do
1525 what it wants. But truncate it if the machine will do that. */
1529 #ifdef SHIFT_COUNT_TRUNCATED
1530 if (SHIFT_COUNT_TRUNCATED
)
1534 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1541 #ifdef SHIFT_COUNT_TRUNCATED
1542 if (SHIFT_COUNT_TRUNCATED
)
1546 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1553 #ifdef SHIFT_COUNT_TRUNCATED
1554 if (SHIFT_COUNT_TRUNCATED
)
1558 val
= arg0s
>> arg1
;
1560 /* Bootstrap compiler may not have sign extended the right shift.
1561 Manually extend the sign to insure bootstrap cc matches gcc. */
1562 if (arg0s
< 0 && arg1
> 0)
1563 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1572 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1573 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1581 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1582 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1586 /* Do nothing here. */
1590 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1594 val
= ((unsigned HOST_WIDE_INT
) arg0
1595 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1599 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1603 val
= ((unsigned HOST_WIDE_INT
) arg0
1604 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1611 val
= trunc_int_for_mode (val
, mode
);
1613 return GEN_INT (val
);
1616 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1619 Rather than test for specific case, we do this by a brute-force method
1620 and do all possible simplifications until no more changes occur. Then
1621 we rebuild the operation. */
1624 simplify_plus_minus (code
, mode
, op0
, op1
)
1626 enum machine_mode mode
;
1632 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1633 int first
= 1, negate
= 0, changed
;
1636 memset ((char *) ops
, 0, sizeof ops
);
1638 /* Set up the two operands and then expand them until nothing has been
1639 changed. If we run out of room in our array, give up; this should
1640 almost never happen. */
1642 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1649 for (i
= 0; i
< n_ops
; i
++)
1650 switch (GET_CODE (ops
[i
]))
1657 ops
[n_ops
] = XEXP (ops
[i
], 1);
1658 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1659 ops
[i
] = XEXP (ops
[i
], 0);
1665 ops
[i
] = XEXP (ops
[i
], 0);
1666 negs
[i
] = ! negs
[i
];
1671 ops
[i
] = XEXP (ops
[i
], 0);
1677 /* ~a -> (-a - 1) */
1680 ops
[n_ops
] = constm1_rtx
;
1681 negs
[n_ops
++] = negs
[i
];
1682 ops
[i
] = XEXP (ops
[i
], 0);
1683 negs
[i
] = ! negs
[i
];
1690 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1698 /* If we only have two operands, we can't do anything. */
1702 /* Now simplify each pair of operands until nothing changes. The first
1703 time through just simplify constants against each other. */
1710 for (i
= 0; i
< n_ops
- 1; i
++)
1711 for (j
= i
+ 1; j
< n_ops
; j
++)
1712 if (ops
[i
] != 0 && ops
[j
] != 0
1713 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1715 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1716 enum rtx_code ncode
= PLUS
;
1718 if (negs
[i
] && ! negs
[j
])
1719 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1720 else if (! negs
[i
] && negs
[j
])
1723 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1726 ops
[i
] = tem
, ops
[j
] = 0;
1727 negs
[i
] = negs
[i
] && negs
[j
];
1728 if (GET_CODE (tem
) == NEG
)
1729 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1731 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1732 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1740 /* Pack all the operands to the lower-numbered entries and give up if
1741 we didn't reduce the number of operands we had. Make sure we
1742 count a CONST as two operands. If we have the same number of
1743 operands, but have made more CONSTs than we had, this is also
1744 an improvement, so accept it. */
1746 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1749 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1750 if (GET_CODE (ops
[j
]) == CONST
)
1754 if (i
+ n_consts
> input_ops
1755 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1760 /* If we have a CONST_INT, put it last. */
1761 for (i
= 0; i
< n_ops
- 1; i
++)
1762 if (GET_CODE (ops
[i
]) == CONST_INT
)
1764 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1765 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1768 /* Put a non-negated operand first. If there aren't any, make all
1769 operands positive and negate the whole thing later. */
1770 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1775 for (i
= 0; i
< n_ops
; i
++)
1781 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1782 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1785 /* Now make the result by performing the requested operations. */
1787 for (i
= 1; i
< n_ops
; i
++)
1788 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1790 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1795 rtx op0
, op1
; /* Input */
1796 int equal
, op0lt
, op1lt
; /* Output */
1801 check_fold_consts (data
)
1804 struct cfc_args
*args
= (struct cfc_args
*) data
;
1805 REAL_VALUE_TYPE d0
, d1
;
1807 /* We may possibly raise an exception while reading the value. */
1808 args
->unordered
= 1;
1809 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1810 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1812 /* Comparisons of Inf versus Inf are ordered. */
1813 if (REAL_VALUE_ISNAN (d0
)
1814 || REAL_VALUE_ISNAN (d1
))
1816 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1817 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1818 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1819 args
->unordered
= 0;
1822 /* Like simplify_binary_operation except used for relational operators.
1823 MODE is the mode of the operands, not that of the result. If MODE
1824 is VOIDmode, both operands must also be VOIDmode and we compare the
1825 operands in "infinite precision".
1827 If no simplification is possible, this function returns zero. Otherwise,
1828 it returns either const_true_rtx or const0_rtx. */
1831 simplify_relational_operation (code
, mode
, op0
, op1
)
1833 enum machine_mode mode
;
1836 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1841 if (mode
== VOIDmode
1842 && (GET_MODE (op0
) != VOIDmode
1843 || GET_MODE (op1
) != VOIDmode
))
1846 /* If op0 is a compare, extract the comparison arguments from it. */
1847 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1848 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1850 trueop0
= avoid_constant_pool_reference (op0
);
1851 trueop1
= avoid_constant_pool_reference (op1
);
1853 /* We can't simplify MODE_CC values since we don't know what the
1854 actual comparison is. */
1855 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1862 /* Make sure the constant is second. */
1863 if (swap_commutative_operands_p (trueop0
, trueop1
))
1865 tem
= op0
, op0
= op1
, op1
= tem
;
1866 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1867 code
= swap_condition (code
);
1870 /* For integer comparisons of A and B maybe we can simplify A - B and can
1871 then simplify a comparison of that with zero. If A and B are both either
1872 a register or a CONST_INT, this can't help; testing for these cases will
1873 prevent infinite recursion here and speed things up.
1875 If CODE is an unsigned comparison, then we can never do this optimization,
1876 because it gives an incorrect result if the subtraction wraps around zero.
1877 ANSI C defines unsigned operations such that they never overflow, and
1878 thus such cases can not be ignored. */
1880 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
1881 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
1882 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
1883 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1884 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1885 return simplify_relational_operation (signed_condition (code
),
1886 mode
, tem
, const0_rtx
);
1888 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
1889 return const_true_rtx
;
1891 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
1894 /* For non-IEEE floating-point, if the two operands are equal, we know the
1896 if (rtx_equal_p (trueop0
, trueop1
)
1897 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1898 || ! FLOAT_MODE_P (GET_MODE (trueop0
))
1899 || flag_unsafe_math_optimizations
))
1900 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1902 /* If the operands are floating-point constants, see if we can fold
1904 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1905 else if (GET_CODE (trueop0
) == CONST_DOUBLE
1906 && GET_CODE (trueop1
) == CONST_DOUBLE
1907 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
1909 struct cfc_args args
;
1911 /* Setup input for check_fold_consts() */
1916 if (!do_float_handler (check_fold_consts
, (PTR
) &args
))
1929 return const_true_rtx
;
1942 /* Receive output from check_fold_consts() */
1944 op0lt
= op0ltu
= args
.op0lt
;
1945 op1lt
= op1ltu
= args
.op1lt
;
1947 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1949 /* Otherwise, see if the operands are both integers. */
1950 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1951 && (GET_CODE (trueop0
) == CONST_DOUBLE
1952 || GET_CODE (trueop0
) == CONST_INT
)
1953 && (GET_CODE (trueop1
) == CONST_DOUBLE
1954 || GET_CODE (trueop1
) == CONST_INT
))
1956 int width
= GET_MODE_BITSIZE (mode
);
1957 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1958 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1960 /* Get the two words comprising each integer constant. */
1961 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1963 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
1964 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
1968 l0u
= l0s
= INTVAL (trueop0
);
1969 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1972 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1974 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
1975 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
1979 l1u
= l1s
= INTVAL (trueop1
);
1980 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1983 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1984 we have to sign or zero-extend the values. */
1985 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1987 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1988 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1990 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1991 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1993 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1994 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1996 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1997 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1999 equal
= (h0u
== h1u
&& l0u
== l1u
);
2000 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2001 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2002 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2003 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2006 /* Otherwise, there are some code-specific tests we can make. */
2012 /* References to the frame plus a constant or labels cannot
2013 be zero, but a SYMBOL_REF can due to #pragma weak. */
2014 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2015 || GET_CODE (trueop0
) == LABEL_REF
)
2016 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2017 /* On some machines, the ap reg can be 0 sometimes. */
2018 && op0
!= arg_pointer_rtx
2025 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2026 || GET_CODE (trueop0
) == LABEL_REF
)
2027 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2028 && op0
!= arg_pointer_rtx
2031 return const_true_rtx
;
2035 /* Unsigned values are never negative. */
2036 if (trueop1
== const0_rtx
)
2037 return const_true_rtx
;
2041 if (trueop1
== const0_rtx
)
2046 /* Unsigned values are never greater than the largest
2048 if (GET_CODE (trueop1
) == CONST_INT
2049 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2050 && INTEGRAL_MODE_P (mode
))
2051 return const_true_rtx
;
2055 if (GET_CODE (trueop1
) == CONST_INT
2056 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2057 && INTEGRAL_MODE_P (mode
))
2068 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2074 return equal
? const_true_rtx
: const0_rtx
;
2077 return ! equal
? const_true_rtx
: const0_rtx
;
2080 return op0lt
? const_true_rtx
: const0_rtx
;
2083 return op1lt
? const_true_rtx
: const0_rtx
;
2085 return op0ltu
? const_true_rtx
: const0_rtx
;
2087 return op1ltu
? const_true_rtx
: const0_rtx
;
2090 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2093 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2095 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2097 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2099 return const_true_rtx
;
2107 /* Simplify CODE, an operation with result mode MODE and three operands,
2108 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2109 a constant. Return 0 if no simplifications is possible. */
2112 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2114 enum machine_mode mode
, op0_mode
;
2117 unsigned int width
= GET_MODE_BITSIZE (mode
);
2119 /* VOIDmode means "infinite" precision. */
2121 width
= HOST_BITS_PER_WIDE_INT
;
2127 if (GET_CODE (op0
) == CONST_INT
2128 && GET_CODE (op1
) == CONST_INT
2129 && GET_CODE (op2
) == CONST_INT
2130 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2131 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2133 /* Extracting a bit-field from a constant */
2134 HOST_WIDE_INT val
= INTVAL (op0
);
2136 if (BITS_BIG_ENDIAN
)
2137 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2138 - INTVAL (op2
) - INTVAL (op1
));
2140 val
>>= INTVAL (op2
);
2142 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2144 /* First zero-extend. */
2145 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2146 /* If desired, propagate sign bit. */
2147 if (code
== SIGN_EXTRACT
2148 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2149 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2152 /* Clear the bits that don't belong in our mode,
2153 unless they and our sign bit are all one.
2154 So we get either a reasonable negative value or a reasonable
2155 unsigned value for this mode. */
2156 if (width
< HOST_BITS_PER_WIDE_INT
2157 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2158 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2159 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2161 return GEN_INT (val
);
2166 if (GET_CODE (op0
) == CONST_INT
)
2167 return op0
!= const0_rtx
? op1
: op2
;
2169 /* Convert a == b ? b : a to "a". */
2170 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2171 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2172 && rtx_equal_p (XEXP (op0
, 0), op1
)
2173 && rtx_equal_p (XEXP (op0
, 1), op2
))
2175 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2176 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2177 && rtx_equal_p (XEXP (op0
, 1), op1
)
2178 && rtx_equal_p (XEXP (op0
, 0), op2
))
2180 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2182 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2183 ? GET_MODE (XEXP (op0
, 1))
2184 : GET_MODE (XEXP (op0
, 0)));
2186 if (cmp_mode
== VOIDmode
)
2187 cmp_mode
= op0_mode
;
2188 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2189 XEXP (op0
, 0), XEXP (op0
, 1));
2191 /* See if any simplifications were possible. */
2192 if (temp
== const0_rtx
)
2194 else if (temp
== const1_rtx
)
2199 /* Look for happy constants in op1 and op2. */
2200 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2202 HOST_WIDE_INT t
= INTVAL (op1
);
2203 HOST_WIDE_INT f
= INTVAL (op2
);
2205 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2206 code
= GET_CODE (op0
);
2207 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2210 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2218 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2230 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2231 Return 0 if no simplifications is possible. */
2233 simplify_subreg (outermode
, op
, innermode
, byte
)
2236 enum machine_mode outermode
, innermode
;
2238 /* Little bit of sanity checking. */
2239 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2240 || innermode
== BLKmode
|| outermode
== BLKmode
)
2243 if (GET_MODE (op
) != innermode
2244 && GET_MODE (op
) != VOIDmode
)
2247 if (byte
% GET_MODE_SIZE (outermode
)
2248 || byte
>= GET_MODE_SIZE (innermode
))
2251 if (outermode
== innermode
&& !byte
)
2254 /* Attempt to simplify constant to non-SUBREG expression. */
2255 if (CONSTANT_P (op
))
2258 unsigned HOST_WIDE_INT val
= 0;
2260 /* ??? This code is partly redundant with code below, but can handle
2261 the subregs of floats and similar corner cases.
2262 Later it we should move all simplification code here and rewrite
2263 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2264 using SIMPLIFY_SUBREG. */
2265 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2267 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2272 /* Similar comment as above apply here. */
2273 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2274 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2275 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2277 rtx
new = constant_subword (op
,
2278 (byte
/ UNITS_PER_WORD
),
2284 offset
= byte
* BITS_PER_UNIT
;
2285 switch (GET_CODE (op
))
2288 if (GET_MODE (op
) != VOIDmode
)
2291 /* We can't handle this case yet. */
2292 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2295 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2296 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2297 && BYTES_BIG_ENDIAN
)
2298 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2299 && WORDS_BIG_ENDIAN
))
2301 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2302 offset
%= HOST_BITS_PER_WIDE_INT
;
2304 /* We've already picked the word we want from a double, so
2305 pretend this is actually an integer. */
2306 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2310 if (GET_CODE (op
) == CONST_INT
)
2313 /* We don't handle synthetizing of non-integral constants yet. */
2314 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2317 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2319 if (WORDS_BIG_ENDIAN
)
2320 offset
= (GET_MODE_BITSIZE (innermode
)
2321 - GET_MODE_BITSIZE (outermode
) - offset
);
2322 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2323 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2324 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2325 - 2 * (offset
% BITS_PER_WORD
));
2328 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2329 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2333 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2334 val
= trunc_int_for_mode (val
, outermode
);
2335 return GEN_INT (val
);
2342 /* Changing mode twice with SUBREG => just change it once,
2343 or not at all if changing back op starting mode. */
2344 if (GET_CODE (op
) == SUBREG
)
2346 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2347 int final_offset
= byte
+ SUBREG_BYTE (op
);
2350 if (outermode
== innermostmode
2351 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2352 return SUBREG_REG (op
);
2354 /* The SUBREG_BYTE represents offset, as if the value were stored
2355 in memory. Irritating exception is paradoxical subreg, where
2356 we define SUBREG_BYTE to be 0. On big endian machines, this
2357 value should be negative. For a moment, undo this exception. */
2358 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2360 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2361 if (WORDS_BIG_ENDIAN
)
2362 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2363 if (BYTES_BIG_ENDIAN
)
2364 final_offset
+= difference
% UNITS_PER_WORD
;
2366 if (SUBREG_BYTE (op
) == 0
2367 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2369 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2370 if (WORDS_BIG_ENDIAN
)
2371 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2372 if (BYTES_BIG_ENDIAN
)
2373 final_offset
+= difference
% UNITS_PER_WORD
;
2376 /* See whether resulting subreg will be paradoxical. */
2377 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2379 /* In nonparadoxical subregs we can't handle negative offsets. */
2380 if (final_offset
< 0)
2382 /* Bail out in case resulting subreg would be incorrect. */
2383 if (final_offset
% GET_MODE_SIZE (outermode
)
2384 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2390 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2392 /* In paradoxical subreg, see if we are still looking on lower part.
2393 If so, our SUBREG_BYTE will be 0. */
2394 if (WORDS_BIG_ENDIAN
)
2395 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2396 if (BYTES_BIG_ENDIAN
)
2397 offset
+= difference
% UNITS_PER_WORD
;
2398 if (offset
== final_offset
)
2404 /* Recurse for futher possible simplifications. */
2405 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2406 GET_MODE (SUBREG_REG (op
)),
2410 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2413 /* SUBREG of a hard register => just change the register number
2414 and/or mode. If the hard register is not valid in that mode,
2415 suppress this simplification. If the hard register is the stack,
2416 frame, or argument pointer, leave this as a SUBREG. */
2419 && (! REG_FUNCTION_VALUE_P (op
)
2420 || ! rtx_equal_function_value_matters
)
2421 #ifdef CLASS_CANNOT_CHANGE_MODE
2422 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode
, innermode
)
2423 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2424 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
2425 && (TEST_HARD_REG_BIT
2426 (reg_class_contents
[(int) CLASS_CANNOT_CHANGE_MODE
],
2429 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2430 && ((reload_completed
&& !frame_pointer_needed
)
2431 || (REGNO (op
) != FRAME_POINTER_REGNUM
2432 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2433 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2436 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2437 && REGNO (op
) != ARG_POINTER_REGNUM
2439 && REGNO (op
) != STACK_POINTER_REGNUM
)
2441 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2444 /* ??? We do allow it if the current REG is not valid for
2445 its mode. This is a kludge to work around how float/complex
2446 arguments are passed on 32-bit Sparc and should be fixed. */
2447 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2448 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2449 return gen_rtx_REG (outermode
, final_regno
);
2452 /* If we have a SUBREG of a register that we are replacing and we are
2453 replacing it with a MEM, make a new MEM and try replacing the
2454 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2455 or if we would be widening it. */
2457 if (GET_CODE (op
) == MEM
2458 && ! mode_dependent_address_p (XEXP (op
, 0))
2459 /* Allow splitting of volatile memory references in case we don't
2460 have instruction to move the whole thing. */
2461 && (! MEM_VOLATILE_P (op
)
2462 || (mov_optab
->handlers
[(int) innermode
].insn_code
2463 == CODE_FOR_nothing
))
2464 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2465 return adjust_address_nv (op
, outermode
, byte
);
2467 /* Handle complex values represented as CONCAT
2468 of real and imaginary part. */
2469 if (GET_CODE (op
) == CONCAT
)
2471 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2472 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2473 unsigned int final_offset
;
2476 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2477 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2480 /* We can at least simplify it by referring directly to the relevent part. */
2481 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2486 /* Make a SUBREG operation or equivalent if it folds. */
2489 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2492 enum machine_mode outermode
, innermode
;
2495 /* Little bit of sanity checking. */
2496 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2497 || innermode
== BLKmode
|| outermode
== BLKmode
)
2500 if (GET_MODE (op
) != innermode
2501 && GET_MODE (op
) != VOIDmode
)
2504 if (byte
% GET_MODE_SIZE (outermode
)
2505 || byte
>= GET_MODE_SIZE (innermode
))
2508 if (GET_CODE (op
) == QUEUED
)
2511 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2515 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2518 return gen_rtx_SUBREG (outermode
, op
, byte
);
2520 /* Simplify X, an rtx expression.
2522 Return the simplified expression or NULL if no simplifications
2525 This is the preferred entry point into the simplification routines;
2526 however, we still allow passes to call the more specific routines.
2528 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2529 code that need to be unified.
2531 1. fold_rtx in cse.c. This code uses various CSE specific
2532 information to aid in RTL simplification.
2534 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2535 it uses combine specific information to aid in RTL
2538 3. The routines in this file.
2541 Long term we want to only have one body of simplification code; to
2542 get to that state I recommend the following steps:
2544 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2545 which are not pass dependent state into these routines.
2547 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2548 use this routine whenever possible.
2550 3. Allow for pass dependent state to be provided to these
2551 routines and add simplifications based on the pass dependent
2552 state. Remove code from cse.c & combine.c that becomes
2555 It will take time, but ultimately the compiler will be easier to
2556 maintain and improve. It's totally silly that when we add a
2557 simplification that it needs to be added to 4 places (3 for RTL
2558 simplification and 1 for tree simplification. */
2564 enum rtx_code code
= GET_CODE (x
);
2565 enum machine_mode mode
= GET_MODE (x
);
2567 switch (GET_RTX_CLASS (code
))
2570 return simplify_unary_operation (code
, mode
,
2571 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2573 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
2578 XEXP (x
, 0) = XEXP (x
, 1);
2580 return simplify_binary_operation (code
, mode
,
2581 XEXP (x
, 0), XEXP (x
, 1));
2585 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2589 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2590 XEXP (x
, 0), XEXP (x
, 1),
2594 return simplify_relational_operation (code
,
2595 ((GET_MODE (XEXP (x
, 0))
2597 ? GET_MODE (XEXP (x
, 0))
2598 : GET_MODE (XEXP (x
, 1))),
2599 XEXP (x
, 0), XEXP (x
, 1));
2601 /* The only case we try to handle is a SUBREG. */
2603 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2604 GET_MODE (SUBREG_REG (x
)),