1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
51 static bool plus_minus_operand_p (const_rtx
);
52 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
53 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
54 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
56 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
58 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
59 enum machine_mode
, rtx
, rtx
);
60 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, const_rtx i
)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 else if (width
<= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x
)
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
99 /* FIXME: We don't yet have a representation for wider modes. */
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a unary operation CODE whose output mode is to be
568 MODE with input operand OP whose mode was originally OP_MODE.
569 Return zero if no simplification can be made. */
571 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
572 rtx op
, enum machine_mode op_mode
)
576 trueop
= avoid_constant_pool_reference (op
);
578 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
582 return simplify_unary_operation_1 (code
, mode
, op
);
585 /* Perform some simplifications we can do even if the operands
588 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
590 enum rtx_code reversed
;
596 /* (not (not X)) == X. */
597 if (GET_CODE (op
) == NOT
)
600 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
601 comparison is all ones. */
602 if (COMPARISON_P (op
)
603 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
604 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
605 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
606 XEXP (op
, 0), XEXP (op
, 1));
608 /* (not (plus X -1)) can become (neg X). */
609 if (GET_CODE (op
) == PLUS
610 && XEXP (op
, 1) == constm1_rtx
)
611 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
613 /* Similarly, (not (neg X)) is (plus X -1). */
614 if (GET_CODE (op
) == NEG
)
615 return plus_constant (mode
, XEXP (op
, 0), -1);
617 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
618 if (GET_CODE (op
) == XOR
619 && CONST_INT_P (XEXP (op
, 1))
620 && (temp
= simplify_unary_operation (NOT
, mode
,
621 XEXP (op
, 1), mode
)) != 0)
622 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
624 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
625 if (GET_CODE (op
) == PLUS
626 && CONST_INT_P (XEXP (op
, 1))
627 && mode_signbit_p (mode
, XEXP (op
, 1))
628 && (temp
= simplify_unary_operation (NOT
, mode
,
629 XEXP (op
, 1), mode
)) != 0)
630 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
633 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
634 operands other than 1, but that is not valid. We could do a
635 similar simplification for (not (lshiftrt C X)) where C is
636 just the sign bit, but this doesn't seem common enough to
638 if (GET_CODE (op
) == ASHIFT
639 && XEXP (op
, 0) == const1_rtx
)
641 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
642 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
645 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
646 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
647 so we can perform the above simplification. */
649 if (STORE_FLAG_VALUE
== -1
650 && GET_CODE (op
) == ASHIFTRT
651 && GET_CODE (XEXP (op
, 1))
652 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
653 return simplify_gen_relational (GE
, mode
, VOIDmode
,
654 XEXP (op
, 0), const0_rtx
);
657 if (GET_CODE (op
) == SUBREG
658 && subreg_lowpart_p (op
)
659 && (GET_MODE_SIZE (GET_MODE (op
))
660 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
661 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
662 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
664 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
667 x
= gen_rtx_ROTATE (inner_mode
,
668 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
670 XEXP (SUBREG_REG (op
), 1));
671 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
674 /* Apply De Morgan's laws to reduce number of patterns for machines
675 with negating logical insns (and-not, nand, etc.). If result has
676 only one NOT, put it first, since that is how the patterns are
679 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
681 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
682 enum machine_mode op_mode
;
684 op_mode
= GET_MODE (in1
);
685 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
687 op_mode
= GET_MODE (in2
);
688 if (op_mode
== VOIDmode
)
690 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
692 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
695 in2
= in1
; in1
= tem
;
698 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
704 /* (neg (neg X)) == X. */
705 if (GET_CODE (op
) == NEG
)
708 /* (neg (plus X 1)) can become (not X). */
709 if (GET_CODE (op
) == PLUS
710 && XEXP (op
, 1) == const1_rtx
)
711 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
713 /* Similarly, (neg (not X)) is (plus X 1). */
714 if (GET_CODE (op
) == NOT
)
715 return plus_constant (mode
, XEXP (op
, 0), 1);
717 /* (neg (minus X Y)) can become (minus Y X). This transformation
718 isn't safe for modes with signed zeros, since if X and Y are
719 both +0, (minus Y X) is the same as (minus X Y). If the
720 rounding mode is towards +infinity (or -infinity) then the two
721 expressions will be rounded differently. */
722 if (GET_CODE (op
) == MINUS
723 && !HONOR_SIGNED_ZEROS (mode
)
724 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
725 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
727 if (GET_CODE (op
) == PLUS
728 && !HONOR_SIGNED_ZEROS (mode
)
729 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
731 /* (neg (plus A C)) is simplified to (minus -C A). */
732 if (CONST_INT_P (XEXP (op
, 1))
733 || CONST_DOUBLE_P (XEXP (op
, 1)))
735 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
737 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
740 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
741 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
742 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
745 /* (neg (mult A B)) becomes (mult A (neg B)).
746 This works even for floating-point values. */
747 if (GET_CODE (op
) == MULT
748 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
750 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
751 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
754 /* NEG commutes with ASHIFT since it is multiplication. Only do
755 this if we can then eliminate the NEG (e.g., if the operand
757 if (GET_CODE (op
) == ASHIFT
)
759 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
761 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
764 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
765 C is equal to the width of MODE minus 1. */
766 if (GET_CODE (op
) == ASHIFTRT
767 && CONST_INT_P (XEXP (op
, 1))
768 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
769 return simplify_gen_binary (LSHIFTRT
, mode
,
770 XEXP (op
, 0), XEXP (op
, 1));
772 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
773 C is equal to the width of MODE minus 1. */
774 if (GET_CODE (op
) == LSHIFTRT
775 && CONST_INT_P (XEXP (op
, 1))
776 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
777 return simplify_gen_binary (ASHIFTRT
, mode
,
778 XEXP (op
, 0), XEXP (op
, 1));
780 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
781 if (GET_CODE (op
) == XOR
782 && XEXP (op
, 1) == const1_rtx
783 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
784 return plus_constant (mode
, XEXP (op
, 0), -1);
786 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
787 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
788 if (GET_CODE (op
) == LT
789 && XEXP (op
, 1) == const0_rtx
790 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
792 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
793 int isize
= GET_MODE_PRECISION (inner
);
794 if (STORE_FLAG_VALUE
== 1)
796 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
797 GEN_INT (isize
- 1));
800 if (GET_MODE_PRECISION (mode
) > isize
)
801 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
802 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
804 else if (STORE_FLAG_VALUE
== -1)
806 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
807 GEN_INT (isize
- 1));
810 if (GET_MODE_PRECISION (mode
) > isize
)
811 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
812 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
818 /* We can't handle truncation to a partial integer mode here
819 because we don't know the real bitsize of the partial
821 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
824 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
825 if ((GET_CODE (op
) == SIGN_EXTEND
826 || GET_CODE (op
) == ZERO_EXTEND
)
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
831 (OP:SI foo:SI) if OP is NEG or ABS. */
832 if ((GET_CODE (op
) == ABS
833 || GET_CODE (op
) == NEG
)
834 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
835 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
836 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
837 return simplify_gen_unary (GET_CODE (op
), mode
,
838 XEXP (XEXP (op
, 0), 0), mode
);
840 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 if (GET_CODE (op
) == SUBREG
843 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
844 && subreg_lowpart_p (op
))
845 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
846 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
848 /* If we know that the value is already truncated, we can
849 replace the TRUNCATE with a SUBREG. Note that this is also
850 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
851 modes we just have to apply a different definition for
852 truncation. But don't do this for an (LSHIFTRT (MULT ...))
853 since this will cause problems with the umulXi3_highpart
855 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
856 ? (num_sign_bit_copies (op
, GET_MODE (op
))
857 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
858 - GET_MODE_PRECISION (mode
)))
859 : truncated_to_mode (mode
, op
))
860 && ! (GET_CODE (op
) == LSHIFTRT
861 && GET_CODE (XEXP (op
, 0)) == MULT
))
862 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
864 /* A truncate of a comparison can be replaced with a subreg if
865 STORE_FLAG_VALUE permits. This is like the previous test,
866 but it works even if the comparison is done in a mode larger
867 than HOST_BITS_PER_WIDE_INT. */
868 if (HWI_COMPUTABLE_MODE_P (mode
)
870 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
871 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
873 /* A truncate of a memory is just loading the low part of the memory
874 if we are not changing the meaning of the address. */
875 if (GET_CODE (op
) == MEM
876 && !MEM_VOLATILE_P (op
)
877 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
878 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
883 if (DECIMAL_FLOAT_MODE_P (mode
))
886 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
887 if (GET_CODE (op
) == FLOAT_EXTEND
888 && GET_MODE (XEXP (op
, 0)) == mode
)
891 /* (float_truncate:SF (float_truncate:DF foo:XF))
892 = (float_truncate:SF foo:XF).
893 This may eliminate double rounding, so it is unsafe.
895 (float_truncate:SF (float_extend:XF foo:DF))
896 = (float_truncate:SF foo:DF).
898 (float_truncate:DF (float_extend:XF foo:SF))
899 = (float_extend:SF foo:DF). */
900 if ((GET_CODE (op
) == FLOAT_TRUNCATE
901 && flag_unsafe_math_optimizations
)
902 || GET_CODE (op
) == FLOAT_EXTEND
)
903 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
905 > GET_MODE_SIZE (mode
)
906 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
910 /* (float_truncate (float x)) is (float x) */
911 if (GET_CODE (op
) == FLOAT
912 && (flag_unsafe_math_optimizations
913 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
914 && ((unsigned)significand_size (GET_MODE (op
))
915 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
916 - num_sign_bit_copies (XEXP (op
, 0),
917 GET_MODE (XEXP (op
, 0))))))))
918 return simplify_gen_unary (FLOAT
, mode
,
920 GET_MODE (XEXP (op
, 0)));
922 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
923 (OP:SF foo:SF) if OP is NEG or ABS. */
924 if ((GET_CODE (op
) == ABS
925 || GET_CODE (op
) == NEG
)
926 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
927 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
928 return simplify_gen_unary (GET_CODE (op
), mode
,
929 XEXP (XEXP (op
, 0), 0), mode
);
931 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
932 is (float_truncate:SF x). */
933 if (GET_CODE (op
) == SUBREG
934 && subreg_lowpart_p (op
)
935 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
936 return SUBREG_REG (op
);
940 if (DECIMAL_FLOAT_MODE_P (mode
))
943 /* (float_extend (float_extend x)) is (float_extend x)
945 (float_extend (float x)) is (float x) assuming that double
946 rounding can't happen.
948 if (GET_CODE (op
) == FLOAT_EXTEND
949 || (GET_CODE (op
) == FLOAT
950 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
951 && ((unsigned)significand_size (GET_MODE (op
))
952 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
953 - num_sign_bit_copies (XEXP (op
, 0),
954 GET_MODE (XEXP (op
, 0)))))))
955 return simplify_gen_unary (GET_CODE (op
), mode
,
957 GET_MODE (XEXP (op
, 0)));
962 /* (abs (neg <foo>)) -> (abs <foo>) */
963 if (GET_CODE (op
) == NEG
)
964 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
965 GET_MODE (XEXP (op
, 0)));
967 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
969 if (GET_MODE (op
) == VOIDmode
)
972 /* If operand is something known to be positive, ignore the ABS. */
973 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
974 || val_signbit_known_clear_p (GET_MODE (op
),
975 nonzero_bits (op
, GET_MODE (op
))))
978 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
979 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
980 return gen_rtx_NEG (mode
, op
);
985 /* (ffs (*_extend <X>)) = (ffs <X>) */
986 if (GET_CODE (op
) == SIGN_EXTEND
987 || GET_CODE (op
) == ZERO_EXTEND
)
988 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
989 GET_MODE (XEXP (op
, 0)));
993 switch (GET_CODE (op
))
997 /* (popcount (zero_extend <X>)) = (popcount <X>) */
998 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
999 GET_MODE (XEXP (op
, 0)));
1003 /* Rotations don't affect popcount. */
1004 if (!side_effects_p (XEXP (op
, 1)))
1005 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1006 GET_MODE (XEXP (op
, 0)));
1015 switch (GET_CODE (op
))
1021 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1022 GET_MODE (XEXP (op
, 0)));
1026 /* Rotations don't affect parity. */
1027 if (!side_effects_p (XEXP (op
, 1)))
1028 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1029 GET_MODE (XEXP (op
, 0)));
1038 /* (bswap (bswap x)) -> x. */
1039 if (GET_CODE (op
) == BSWAP
)
1040 return XEXP (op
, 0);
1044 /* (float (sign_extend <X>)) = (float <X>). */
1045 if (GET_CODE (op
) == SIGN_EXTEND
)
1046 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1047 GET_MODE (XEXP (op
, 0)));
1051 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1052 becomes just the MINUS if its mode is MODE. This allows
1053 folding switch statements on machines using casesi (such as
1055 if (GET_CODE (op
) == TRUNCATE
1056 && GET_MODE (XEXP (op
, 0)) == mode
1057 && GET_CODE (XEXP (op
, 0)) == MINUS
1058 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1059 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1060 return XEXP (op
, 0);
1062 /* Extending a widening multiplication should be canonicalized to
1063 a wider widening multiplication. */
1064 if (GET_CODE (op
) == MULT
)
1066 rtx lhs
= XEXP (op
, 0);
1067 rtx rhs
= XEXP (op
, 1);
1068 enum rtx_code lcode
= GET_CODE (lhs
);
1069 enum rtx_code rcode
= GET_CODE (rhs
);
1071 /* Widening multiplies usually extend both operands, but sometimes
1072 they use a shift to extract a portion of a register. */
1073 if ((lcode
== SIGN_EXTEND
1074 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1075 && (rcode
== SIGN_EXTEND
1076 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1078 enum machine_mode lmode
= GET_MODE (lhs
);
1079 enum machine_mode rmode
= GET_MODE (rhs
);
1082 if (lcode
== ASHIFTRT
)
1083 /* Number of bits not shifted off the end. */
1084 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1085 else /* lcode == SIGN_EXTEND */
1086 /* Size of inner mode. */
1087 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1089 if (rcode
== ASHIFTRT
)
1090 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1091 else /* rcode == SIGN_EXTEND */
1092 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1094 /* We can only widen multiplies if the result is mathematiclly
1095 equivalent. I.e. if overflow was impossible. */
1096 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1097 return simplify_gen_binary
1099 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1100 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1104 /* Check for a sign extension of a subreg of a promoted
1105 variable, where the promotion is sign-extended, and the
1106 target mode is the same as the variable's promotion. */
1107 if (GET_CODE (op
) == SUBREG
1108 && SUBREG_PROMOTED_VAR_P (op
)
1109 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1110 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1111 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1113 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1114 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1115 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1117 gcc_assert (GET_MODE_BITSIZE (mode
)
1118 > GET_MODE_BITSIZE (GET_MODE (op
)));
1119 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1120 GET_MODE (XEXP (op
, 0)));
1123 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1124 is (sign_extend:M (subreg:O <X>)) if there is mode with
1125 GET_MODE_BITSIZE (N) - I bits.
1126 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1127 is similarly (zero_extend:M (subreg:O <X>)). */
1128 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1129 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1130 && CONST_INT_P (XEXP (op
, 1))
1131 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1132 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1134 enum machine_mode tmode
1135 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1136 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1137 gcc_assert (GET_MODE_BITSIZE (mode
)
1138 > GET_MODE_BITSIZE (GET_MODE (op
)));
1139 if (tmode
!= BLKmode
)
1142 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1143 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1144 ? SIGN_EXTEND
: ZERO_EXTEND
,
1145 mode
, inner
, tmode
);
1149 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1150 /* As we do not know which address space the pointer is referring to,
1151 we can do this only if the target does not support different pointer
1152 or address modes depending on the address space. */
1153 if (target_default_pointer_address_modes_p ()
1154 && ! POINTERS_EXTEND_UNSIGNED
1155 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1157 || (GET_CODE (op
) == SUBREG
1158 && REG_P (SUBREG_REG (op
))
1159 && REG_POINTER (SUBREG_REG (op
))
1160 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1161 return convert_memory_address (Pmode
, op
);
1166 /* Check for a zero extension of a subreg of a promoted
1167 variable, where the promotion is zero-extended, and the
1168 target mode is the same as the variable's promotion. */
1169 if (GET_CODE (op
) == SUBREG
1170 && SUBREG_PROMOTED_VAR_P (op
)
1171 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1172 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1173 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1175 /* Extending a widening multiplication should be canonicalized to
1176 a wider widening multiplication. */
1177 if (GET_CODE (op
) == MULT
)
1179 rtx lhs
= XEXP (op
, 0);
1180 rtx rhs
= XEXP (op
, 1);
1181 enum rtx_code lcode
= GET_CODE (lhs
);
1182 enum rtx_code rcode
= GET_CODE (rhs
);
1184 /* Widening multiplies usually extend both operands, but sometimes
1185 they use a shift to extract a portion of a register. */
1186 if ((lcode
== ZERO_EXTEND
1187 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1188 && (rcode
== ZERO_EXTEND
1189 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1191 enum machine_mode lmode
= GET_MODE (lhs
);
1192 enum machine_mode rmode
= GET_MODE (rhs
);
1195 if (lcode
== LSHIFTRT
)
1196 /* Number of bits not shifted off the end. */
1197 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1198 else /* lcode == ZERO_EXTEND */
1199 /* Size of inner mode. */
1200 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1202 if (rcode
== LSHIFTRT
)
1203 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1204 else /* rcode == ZERO_EXTEND */
1205 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1207 /* We can only widen multiplies if the result is mathematiclly
1208 equivalent. I.e. if overflow was impossible. */
1209 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1210 return simplify_gen_binary
1212 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1213 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1217 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1218 if (GET_CODE (op
) == ZERO_EXTEND
)
1219 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1220 GET_MODE (XEXP (op
, 0)));
1222 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1223 is (zero_extend:M (subreg:O <X>)) if there is mode with
1224 GET_MODE_BITSIZE (N) - I bits. */
1225 if (GET_CODE (op
) == LSHIFTRT
1226 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1227 && CONST_INT_P (XEXP (op
, 1))
1228 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1229 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1231 enum machine_mode tmode
1232 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1233 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1234 if (tmode
!= BLKmode
)
1237 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1238 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1242 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1243 /* As we do not know which address space the pointer is referring to,
1244 we can do this only if the target does not support different pointer
1245 or address modes depending on the address space. */
1246 if (target_default_pointer_address_modes_p ()
1247 && POINTERS_EXTEND_UNSIGNED
> 0
1248 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1250 || (GET_CODE (op
) == SUBREG
1251 && REG_P (SUBREG_REG (op
))
1252 && REG_POINTER (SUBREG_REG (op
))
1253 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1254 return convert_memory_address (Pmode
, op
);
1265 /* Try to compute the value of a unary operation CODE whose output mode is to
1266 be MODE with input operand OP whose mode was originally OP_MODE.
1267 Return zero if the value cannot be computed. */
1269 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1270 rtx op
, enum machine_mode op_mode
)
1272 unsigned int width
= GET_MODE_PRECISION (mode
);
1273 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1275 if (code
== VEC_DUPLICATE
)
1277 gcc_assert (VECTOR_MODE_P (mode
));
1278 if (GET_MODE (op
) != VOIDmode
)
1280 if (!VECTOR_MODE_P (GET_MODE (op
)))
1281 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1283 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1286 if (CONST_INT_P (op
) || CONST_DOUBLE_P (op
)
1287 || GET_CODE (op
) == CONST_VECTOR
)
1289 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1290 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1291 rtvec v
= rtvec_alloc (n_elts
);
1294 if (GET_CODE (op
) != CONST_VECTOR
)
1295 for (i
= 0; i
< n_elts
; i
++)
1296 RTVEC_ELT (v
, i
) = op
;
1299 enum machine_mode inmode
= GET_MODE (op
);
1300 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1301 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1303 gcc_assert (in_n_elts
< n_elts
);
1304 gcc_assert ((n_elts
% in_n_elts
) == 0);
1305 for (i
= 0; i
< n_elts
; i
++)
1306 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1308 return gen_rtx_CONST_VECTOR (mode
, v
);
1312 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1314 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1315 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1316 enum machine_mode opmode
= GET_MODE (op
);
1317 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1318 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1319 rtvec v
= rtvec_alloc (n_elts
);
1322 gcc_assert (op_n_elts
== n_elts
);
1323 for (i
= 0; i
< n_elts
; i
++)
1325 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1326 CONST_VECTOR_ELT (op
, i
),
1327 GET_MODE_INNER (opmode
));
1330 RTVEC_ELT (v
, i
) = x
;
1332 return gen_rtx_CONST_VECTOR (mode
, v
);
1335 /* The order of these tests is critical so that, for example, we don't
1336 check the wrong mode (input vs. output) for a conversion operation,
1337 such as FIX. At some point, this should be simplified. */
1339 if (code
== FLOAT
&& (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1341 HOST_WIDE_INT hv
, lv
;
1344 if (CONST_INT_P (op
))
1345 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1347 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1349 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1350 d
= real_value_truncate (mode
, d
);
1351 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1353 else if (code
== UNSIGNED_FLOAT
1354 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1356 HOST_WIDE_INT hv
, lv
;
1359 if (CONST_INT_P (op
))
1360 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1362 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1364 if (op_mode
== VOIDmode
1365 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1366 /* We should never get a negative number. */
1367 gcc_assert (hv
>= 0);
1368 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1369 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1371 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1372 d
= real_value_truncate (mode
, d
);
1373 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1376 if (CONST_INT_P (op
)
1377 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1379 HOST_WIDE_INT arg0
= INTVAL (op
);
1393 val
= (arg0
>= 0 ? arg0
: - arg0
);
1397 arg0
&= GET_MODE_MASK (mode
);
1398 val
= ffs_hwi (arg0
);
1402 arg0
&= GET_MODE_MASK (mode
);
1403 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1406 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1410 arg0
&= GET_MODE_MASK (mode
);
1412 val
= GET_MODE_PRECISION (mode
) - 1;
1414 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1416 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1420 arg0
&= GET_MODE_MASK (mode
);
1423 /* Even if the value at zero is undefined, we have to come
1424 up with some replacement. Seems good enough. */
1425 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1426 val
= GET_MODE_PRECISION (mode
);
1429 val
= ctz_hwi (arg0
);
1433 arg0
&= GET_MODE_MASK (mode
);
1436 val
++, arg0
&= arg0
- 1;
1440 arg0
&= GET_MODE_MASK (mode
);
1443 val
++, arg0
&= arg0
- 1;
1452 for (s
= 0; s
< width
; s
+= 8)
1454 unsigned int d
= width
- s
- 8;
1455 unsigned HOST_WIDE_INT byte
;
1456 byte
= (arg0
>> s
) & 0xff;
1467 /* When zero-extending a CONST_INT, we need to know its
1469 gcc_assert (op_mode
!= VOIDmode
);
1470 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1472 /* If we were really extending the mode,
1473 we would have to distinguish between zero-extension
1474 and sign-extension. */
1475 gcc_assert (width
== op_width
);
1478 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1479 val
= arg0
& GET_MODE_MASK (op_mode
);
1485 if (op_mode
== VOIDmode
)
1487 op_width
= GET_MODE_PRECISION (op_mode
);
1488 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1490 /* If we were really extending the mode,
1491 we would have to distinguish between zero-extension
1492 and sign-extension. */
1493 gcc_assert (width
== op_width
);
1496 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1498 val
= arg0
& GET_MODE_MASK (op_mode
);
1499 if (val_signbit_known_set_p (op_mode
, val
))
1500 val
|= ~GET_MODE_MASK (op_mode
);
1508 case FLOAT_TRUNCATE
:
1520 return gen_int_mode (val
, mode
);
1523 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1524 for a DImode operation on a CONST_INT. */
1525 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1526 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1528 double_int first
, value
;
1530 if (CONST_DOUBLE_AS_INT_P (op
))
1531 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1532 CONST_DOUBLE_LOW (op
));
1534 first
= double_int::from_shwi (INTVAL (op
));
1547 if (first
.is_negative ())
1556 value
.low
= ffs_hwi (first
.low
);
1557 else if (first
.high
!= 0)
1558 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1565 if (first
.high
!= 0)
1566 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1567 - HOST_BITS_PER_WIDE_INT
;
1568 else if (first
.low
!= 0)
1569 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1570 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1571 value
.low
= GET_MODE_PRECISION (mode
);
1577 value
.low
= ctz_hwi (first
.low
);
1578 else if (first
.high
!= 0)
1579 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1580 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1581 value
.low
= GET_MODE_PRECISION (mode
);
1585 value
= double_int_zero
;
1589 first
.low
&= first
.low
- 1;
1594 first
.high
&= first
.high
- 1;
1599 value
= double_int_zero
;
1603 first
.low
&= first
.low
- 1;
1608 first
.high
&= first
.high
- 1;
1617 value
= double_int_zero
;
1618 for (s
= 0; s
< width
; s
+= 8)
1620 unsigned int d
= width
- s
- 8;
1621 unsigned HOST_WIDE_INT byte
;
1623 if (s
< HOST_BITS_PER_WIDE_INT
)
1624 byte
= (first
.low
>> s
) & 0xff;
1626 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1628 if (d
< HOST_BITS_PER_WIDE_INT
)
1629 value
.low
|= byte
<< d
;
1631 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1637 /* This is just a change-of-mode, so do nothing. */
1642 gcc_assert (op_mode
!= VOIDmode
);
1644 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1647 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1651 if (op_mode
== VOIDmode
1652 || op_width
> HOST_BITS_PER_WIDE_INT
)
1656 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1657 if (val_signbit_known_set_p (op_mode
, value
.low
))
1658 value
.low
|= ~GET_MODE_MASK (op_mode
);
1660 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1671 return immed_double_int_const (value
, mode
);
1674 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1675 && SCALAR_FLOAT_MODE_P (mode
)
1676 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1678 REAL_VALUE_TYPE d
, t
;
1679 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1684 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1686 real_sqrt (&t
, mode
, &d
);
1690 d
= real_value_abs (&d
);
1693 d
= real_value_negate (&d
);
1695 case FLOAT_TRUNCATE
:
1696 d
= real_value_truncate (mode
, d
);
1699 /* All this does is change the mode, unless changing
1701 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1702 real_convert (&d
, mode
, &d
);
1705 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1712 real_to_target (tmp
, &d
, GET_MODE (op
));
1713 for (i
= 0; i
< 4; i
++)
1715 real_from_target (&d
, tmp
, mode
);
1721 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1724 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1725 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1726 && GET_MODE_CLASS (mode
) == MODE_INT
1727 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1729 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1730 operators are intentionally left unspecified (to ease implementation
1731 by target backends), for consistency, this routine implements the
1732 same semantics for constant folding as used by the middle-end. */
1734 /* This was formerly used only for non-IEEE float.
1735 eggert@twinsun.com says it is safe for IEEE also. */
1736 HOST_WIDE_INT xh
, xl
, th
, tl
;
1737 REAL_VALUE_TYPE x
, t
;
1738 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1742 if (REAL_VALUE_ISNAN (x
))
1745 /* Test against the signed upper bound. */
1746 if (width
> HOST_BITS_PER_WIDE_INT
)
1748 th
= ((unsigned HOST_WIDE_INT
) 1
1749 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1755 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1757 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1758 if (REAL_VALUES_LESS (t
, x
))
1765 /* Test against the signed lower bound. */
1766 if (width
> HOST_BITS_PER_WIDE_INT
)
1768 th
= (unsigned HOST_WIDE_INT
) (-1)
1769 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1775 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1777 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1778 if (REAL_VALUES_LESS (x
, t
))
1784 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1788 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1791 /* Test against the unsigned upper bound. */
1792 if (width
== HOST_BITS_PER_DOUBLE_INT
)
1797 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1799 th
= ((unsigned HOST_WIDE_INT
) 1
1800 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1806 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1808 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1809 if (REAL_VALUES_LESS (t
, x
))
1816 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1822 return immed_double_const (xl
, xh
, mode
);
1828 /* Subroutine of simplify_binary_operation to simplify a commutative,
1829 associative binary operation CODE with result mode MODE, operating
1830 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1831 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1832 canonicalization is possible. */
1835 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1840 /* Linearize the operator to the left. */
1841 if (GET_CODE (op1
) == code
)
1843 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1844 if (GET_CODE (op0
) == code
)
1846 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1847 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1850 /* "a op (b op c)" becomes "(b op c) op a". */
1851 if (! swap_commutative_operands_p (op1
, op0
))
1852 return simplify_gen_binary (code
, mode
, op1
, op0
);
1859 if (GET_CODE (op0
) == code
)
1861 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1862 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1864 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1865 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1868 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1869 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1871 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1873 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1874 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1876 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1883 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1884 and OP1. Return 0 if no simplification is possible.
1886 Don't use this for relational operations such as EQ or LT.
1887 Use simplify_relational_operation instead. */
1889 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1892 rtx trueop0
, trueop1
;
1895 /* Relational operations don't work here. We must know the mode
1896 of the operands in order to do the comparison correctly.
1897 Assuming a full word can give incorrect results.
1898 Consider comparing 128 with -128 in QImode. */
1899 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1900 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1902 /* Make sure the constant is second. */
1903 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1904 && swap_commutative_operands_p (op0
, op1
))
1906 tem
= op0
, op0
= op1
, op1
= tem
;
1909 trueop0
= avoid_constant_pool_reference (op0
);
1910 trueop1
= avoid_constant_pool_reference (op1
);
1912 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1915 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1918 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1919 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1920 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1921 actual constants. */
1924 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1925 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1927 rtx tem
, reversed
, opleft
, opright
;
1929 unsigned int width
= GET_MODE_PRECISION (mode
);
1931 /* Even if we can't compute a constant result,
1932 there are some cases worth simplifying. */
1937 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1938 when x is NaN, infinite, or finite and nonzero. They aren't
1939 when x is -0 and the rounding mode is not towards -infinity,
1940 since (-0) + 0 is then 0. */
1941 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1944 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1945 transformations are safe even for IEEE. */
1946 if (GET_CODE (op0
) == NEG
)
1947 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1948 else if (GET_CODE (op1
) == NEG
)
1949 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1951 /* (~a) + 1 -> -a */
1952 if (INTEGRAL_MODE_P (mode
)
1953 && GET_CODE (op0
) == NOT
1954 && trueop1
== const1_rtx
)
1955 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1957 /* Handle both-operands-constant cases. We can only add
1958 CONST_INTs to constants since the sum of relocatable symbols
1959 can't be handled by most assemblers. Don't add CONST_INT
1960 to CONST_INT since overflow won't be computed properly if wider
1961 than HOST_BITS_PER_WIDE_INT. */
1963 if ((GET_CODE (op0
) == CONST
1964 || GET_CODE (op0
) == SYMBOL_REF
1965 || GET_CODE (op0
) == LABEL_REF
)
1966 && CONST_INT_P (op1
))
1967 return plus_constant (mode
, op0
, INTVAL (op1
));
1968 else if ((GET_CODE (op1
) == CONST
1969 || GET_CODE (op1
) == SYMBOL_REF
1970 || GET_CODE (op1
) == LABEL_REF
)
1971 && CONST_INT_P (op0
))
1972 return plus_constant (mode
, op1
, INTVAL (op0
));
1974 /* See if this is something like X * C - X or vice versa or
1975 if the multiplication is written as a shift. If so, we can
1976 distribute and make a new multiply, shift, or maybe just
1977 have X (if C is 2 in the example above). But don't make
1978 something more expensive than we had before. */
1980 if (SCALAR_INT_MODE_P (mode
))
1982 double_int coeff0
, coeff1
;
1983 rtx lhs
= op0
, rhs
= op1
;
1985 coeff0
= double_int_one
;
1986 coeff1
= double_int_one
;
1988 if (GET_CODE (lhs
) == NEG
)
1990 coeff0
= double_int_minus_one
;
1991 lhs
= XEXP (lhs
, 0);
1993 else if (GET_CODE (lhs
) == MULT
1994 && CONST_INT_P (XEXP (lhs
, 1)))
1996 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
1997 lhs
= XEXP (lhs
, 0);
1999 else if (GET_CODE (lhs
) == ASHIFT
2000 && CONST_INT_P (XEXP (lhs
, 1))
2001 && INTVAL (XEXP (lhs
, 1)) >= 0
2002 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2004 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2005 lhs
= XEXP (lhs
, 0);
2008 if (GET_CODE (rhs
) == NEG
)
2010 coeff1
= double_int_minus_one
;
2011 rhs
= XEXP (rhs
, 0);
2013 else if (GET_CODE (rhs
) == MULT
2014 && CONST_INT_P (XEXP (rhs
, 1)))
2016 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2017 rhs
= XEXP (rhs
, 0);
2019 else if (GET_CODE (rhs
) == ASHIFT
2020 && CONST_INT_P (XEXP (rhs
, 1))
2021 && INTVAL (XEXP (rhs
, 1)) >= 0
2022 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2024 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2025 rhs
= XEXP (rhs
, 0);
2028 if (rtx_equal_p (lhs
, rhs
))
2030 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2033 bool speed
= optimize_function_for_speed_p (cfun
);
2035 val
= coeff0
+ coeff1
;
2036 coeff
= immed_double_int_const (val
, mode
);
2038 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2039 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2044 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2045 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2046 && GET_CODE (op0
) == XOR
2047 && (CONST_INT_P (XEXP (op0
, 1))
2048 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2049 && mode_signbit_p (mode
, op1
))
2050 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2051 simplify_gen_binary (XOR
, mode
, op1
,
2054 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2055 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2056 && GET_CODE (op0
) == MULT
2057 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2061 in1
= XEXP (XEXP (op0
, 0), 0);
2062 in2
= XEXP (op0
, 1);
2063 return simplify_gen_binary (MINUS
, mode
, op1
,
2064 simplify_gen_binary (MULT
, mode
,
2068 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2069 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2071 if (COMPARISON_P (op0
)
2072 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2073 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2074 && (reversed
= reversed_comparison (op0
, mode
)))
2076 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2078 /* If one of the operands is a PLUS or a MINUS, see if we can
2079 simplify this by the associative law.
2080 Don't use the associative law for floating point.
2081 The inaccuracy makes it nonassociative,
2082 and subtle programs can break if operations are associated. */
2084 if (INTEGRAL_MODE_P (mode
)
2085 && (plus_minus_operand_p (op0
)
2086 || plus_minus_operand_p (op1
))
2087 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2090 /* Reassociate floating point addition only when the user
2091 specifies associative math operations. */
2092 if (FLOAT_MODE_P (mode
)
2093 && flag_associative_math
)
2095 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2102 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2103 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2104 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2105 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2107 rtx xop00
= XEXP (op0
, 0);
2108 rtx xop10
= XEXP (op1
, 0);
2111 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2113 if (REG_P (xop00
) && REG_P (xop10
)
2114 && GET_MODE (xop00
) == GET_MODE (xop10
)
2115 && REGNO (xop00
) == REGNO (xop10
)
2116 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2117 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2124 /* We can't assume x-x is 0 even with non-IEEE floating point,
2125 but since it is zero except in very strange circumstances, we
2126 will treat it as zero with -ffinite-math-only. */
2127 if (rtx_equal_p (trueop0
, trueop1
)
2128 && ! side_effects_p (op0
)
2129 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2130 return CONST0_RTX (mode
);
2132 /* Change subtraction from zero into negation. (0 - x) is the
2133 same as -x when x is NaN, infinite, or finite and nonzero.
2134 But if the mode has signed zeros, and does not round towards
2135 -infinity, then 0 - 0 is 0, not -0. */
2136 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2137 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2139 /* (-1 - a) is ~a. */
2140 if (trueop0
== constm1_rtx
)
2141 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2143 /* Subtracting 0 has no effect unless the mode has signed zeros
2144 and supports rounding towards -infinity. In such a case,
2146 if (!(HONOR_SIGNED_ZEROS (mode
)
2147 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2148 && trueop1
== CONST0_RTX (mode
))
2151 /* See if this is something like X * C - X or vice versa or
2152 if the multiplication is written as a shift. If so, we can
2153 distribute and make a new multiply, shift, or maybe just
2154 have X (if C is 2 in the example above). But don't make
2155 something more expensive than we had before. */
2157 if (SCALAR_INT_MODE_P (mode
))
2159 double_int coeff0
, negcoeff1
;
2160 rtx lhs
= op0
, rhs
= op1
;
2162 coeff0
= double_int_one
;
2163 negcoeff1
= double_int_minus_one
;
2165 if (GET_CODE (lhs
) == NEG
)
2167 coeff0
= double_int_minus_one
;
2168 lhs
= XEXP (lhs
, 0);
2170 else if (GET_CODE (lhs
) == MULT
2171 && CONST_INT_P (XEXP (lhs
, 1)))
2173 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2174 lhs
= XEXP (lhs
, 0);
2176 else if (GET_CODE (lhs
) == ASHIFT
2177 && CONST_INT_P (XEXP (lhs
, 1))
2178 && INTVAL (XEXP (lhs
, 1)) >= 0
2179 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2181 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2182 lhs
= XEXP (lhs
, 0);
2185 if (GET_CODE (rhs
) == NEG
)
2187 negcoeff1
= double_int_one
;
2188 rhs
= XEXP (rhs
, 0);
2190 else if (GET_CODE (rhs
) == MULT
2191 && CONST_INT_P (XEXP (rhs
, 1)))
2193 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2194 rhs
= XEXP (rhs
, 0);
2196 else if (GET_CODE (rhs
) == ASHIFT
2197 && CONST_INT_P (XEXP (rhs
, 1))
2198 && INTVAL (XEXP (rhs
, 1)) >= 0
2199 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2201 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2202 negcoeff1
= -negcoeff1
;
2203 rhs
= XEXP (rhs
, 0);
2206 if (rtx_equal_p (lhs
, rhs
))
2208 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2211 bool speed
= optimize_function_for_speed_p (cfun
);
2213 val
= coeff0
+ negcoeff1
;
2214 coeff
= immed_double_int_const (val
, mode
);
2216 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2217 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2222 /* (a - (-b)) -> (a + b). True even for IEEE. */
2223 if (GET_CODE (op1
) == NEG
)
2224 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2226 /* (-x - c) may be simplified as (-c - x). */
2227 if (GET_CODE (op0
) == NEG
2228 && (CONST_INT_P (op1
) || CONST_DOUBLE_P (op1
)))
2230 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2232 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2235 /* Don't let a relocatable value get a negative coeff. */
2236 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2237 return simplify_gen_binary (PLUS
, mode
,
2239 neg_const_int (mode
, op1
));
2241 /* (x - (x & y)) -> (x & ~y) */
2242 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2244 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2246 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2247 GET_MODE (XEXP (op1
, 1)));
2248 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2250 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2252 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2253 GET_MODE (XEXP (op1
, 0)));
2254 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2258 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2259 by reversing the comparison code if valid. */
2260 if (STORE_FLAG_VALUE
== 1
2261 && trueop0
== const1_rtx
2262 && COMPARISON_P (op1
)
2263 && (reversed
= reversed_comparison (op1
, mode
)))
2266 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2267 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2268 && GET_CODE (op1
) == MULT
2269 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2273 in1
= XEXP (XEXP (op1
, 0), 0);
2274 in2
= XEXP (op1
, 1);
2275 return simplify_gen_binary (PLUS
, mode
,
2276 simplify_gen_binary (MULT
, mode
,
2281 /* Canonicalize (minus (neg A) (mult B C)) to
2282 (minus (mult (neg B) C) A). */
2283 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2284 && GET_CODE (op1
) == MULT
2285 && GET_CODE (op0
) == NEG
)
2289 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2290 in2
= XEXP (op1
, 1);
2291 return simplify_gen_binary (MINUS
, mode
,
2292 simplify_gen_binary (MULT
, mode
,
2297 /* If one of the operands is a PLUS or a MINUS, see if we can
2298 simplify this by the associative law. This will, for example,
2299 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2300 Don't use the associative law for floating point.
2301 The inaccuracy makes it nonassociative,
2302 and subtle programs can break if operations are associated. */
2304 if (INTEGRAL_MODE_P (mode
)
2305 && (plus_minus_operand_p (op0
)
2306 || plus_minus_operand_p (op1
))
2307 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2312 if (trueop1
== constm1_rtx
)
2313 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2315 if (GET_CODE (op0
) == NEG
)
2317 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2318 /* If op1 is a MULT as well and simplify_unary_operation
2319 just moved the NEG to the second operand, simplify_gen_binary
2320 below could through simplify_associative_operation move
2321 the NEG around again and recurse endlessly. */
2323 && GET_CODE (op1
) == MULT
2324 && GET_CODE (temp
) == MULT
2325 && XEXP (op1
, 0) == XEXP (temp
, 0)
2326 && GET_CODE (XEXP (temp
, 1)) == NEG
2327 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2330 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2332 if (GET_CODE (op1
) == NEG
)
2334 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2335 /* If op0 is a MULT as well and simplify_unary_operation
2336 just moved the NEG to the second operand, simplify_gen_binary
2337 below could through simplify_associative_operation move
2338 the NEG around again and recurse endlessly. */
2340 && GET_CODE (op0
) == MULT
2341 && GET_CODE (temp
) == MULT
2342 && XEXP (op0
, 0) == XEXP (temp
, 0)
2343 && GET_CODE (XEXP (temp
, 1)) == NEG
2344 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2347 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2350 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2351 x is NaN, since x * 0 is then also NaN. Nor is it valid
2352 when the mode has signed zeros, since multiplying a negative
2353 number by 0 will give -0, not 0. */
2354 if (!HONOR_NANS (mode
)
2355 && !HONOR_SIGNED_ZEROS (mode
)
2356 && trueop1
== CONST0_RTX (mode
)
2357 && ! side_effects_p (op0
))
2360 /* In IEEE floating point, x*1 is not equivalent to x for
2362 if (!HONOR_SNANS (mode
)
2363 && trueop1
== CONST1_RTX (mode
))
2366 /* Convert multiply by constant power of two into shift unless
2367 we are still generating RTL. This test is a kludge. */
2368 if (CONST_INT_P (trueop1
)
2369 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2370 /* If the mode is larger than the host word size, and the
2371 uppermost bit is set, then this isn't a power of two due
2372 to implicit sign extension. */
2373 && (width
<= HOST_BITS_PER_WIDE_INT
2374 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2375 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2377 /* Likewise for multipliers wider than a word. */
2378 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2379 && GET_MODE (op0
) == mode
2380 && CONST_DOUBLE_LOW (trueop1
) == 0
2381 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2382 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2383 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2384 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2385 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2387 /* x*2 is x+x and x*(-1) is -x */
2388 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2389 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2390 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2391 && GET_MODE (op0
) == mode
)
2394 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2396 if (REAL_VALUES_EQUAL (d
, dconst2
))
2397 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2399 if (!HONOR_SNANS (mode
)
2400 && REAL_VALUES_EQUAL (d
, dconstm1
))
2401 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2404 /* Optimize -x * -x as x * x. */
2405 if (FLOAT_MODE_P (mode
)
2406 && GET_CODE (op0
) == NEG
2407 && GET_CODE (op1
) == NEG
2408 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2409 && !side_effects_p (XEXP (op0
, 0)))
2410 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2412 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2413 if (SCALAR_FLOAT_MODE_P (mode
)
2414 && GET_CODE (op0
) == ABS
2415 && GET_CODE (op1
) == ABS
2416 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2417 && !side_effects_p (XEXP (op0
, 0)))
2418 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2420 /* Reassociate multiplication, but for floating point MULTs
2421 only when the user specifies unsafe math optimizations. */
2422 if (! FLOAT_MODE_P (mode
)
2423 || flag_unsafe_math_optimizations
)
2425 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2432 if (trueop1
== CONST0_RTX (mode
))
2434 if (INTEGRAL_MODE_P (mode
)
2435 && trueop1
== CONSTM1_RTX (mode
)
2436 && !side_effects_p (op0
))
2438 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2440 /* A | (~A) -> -1 */
2441 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2442 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2443 && ! side_effects_p (op0
)
2444 && SCALAR_INT_MODE_P (mode
))
2447 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2448 if (CONST_INT_P (op1
)
2449 && HWI_COMPUTABLE_MODE_P (mode
)
2450 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2451 && !side_effects_p (op0
))
2454 /* Canonicalize (X & C1) | C2. */
2455 if (GET_CODE (op0
) == AND
2456 && CONST_INT_P (trueop1
)
2457 && CONST_INT_P (XEXP (op0
, 1)))
2459 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2460 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2461 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2463 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2465 && !side_effects_p (XEXP (op0
, 0)))
2468 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2469 if (((c1
|c2
) & mask
) == mask
)
2470 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2472 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2473 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2475 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2476 gen_int_mode (c1
& ~c2
, mode
));
2477 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2481 /* Convert (A & B) | A to A. */
2482 if (GET_CODE (op0
) == AND
2483 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2484 || rtx_equal_p (XEXP (op0
, 1), op1
))
2485 && ! side_effects_p (XEXP (op0
, 0))
2486 && ! side_effects_p (XEXP (op0
, 1)))
2489 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2490 mode size to (rotate A CX). */
2492 if (GET_CODE (op1
) == ASHIFT
2493 || GET_CODE (op1
) == SUBREG
)
2504 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2505 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2506 && CONST_INT_P (XEXP (opleft
, 1))
2507 && CONST_INT_P (XEXP (opright
, 1))
2508 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2509 == GET_MODE_PRECISION (mode
)))
2510 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2512 /* Same, but for ashift that has been "simplified" to a wider mode
2513 by simplify_shift_const. */
2515 if (GET_CODE (opleft
) == SUBREG
2516 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2517 && GET_CODE (opright
) == LSHIFTRT
2518 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2519 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2520 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2521 && (GET_MODE_SIZE (GET_MODE (opleft
))
2522 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2523 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2524 SUBREG_REG (XEXP (opright
, 0)))
2525 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2526 && CONST_INT_P (XEXP (opright
, 1))
2527 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2528 == GET_MODE_PRECISION (mode
)))
2529 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2530 XEXP (SUBREG_REG (opleft
), 1));
2532 /* If we have (ior (and (X C1) C2)), simplify this by making
2533 C1 as small as possible if C1 actually changes. */
2534 if (CONST_INT_P (op1
)
2535 && (HWI_COMPUTABLE_MODE_P (mode
)
2536 || INTVAL (op1
) > 0)
2537 && GET_CODE (op0
) == AND
2538 && CONST_INT_P (XEXP (op0
, 1))
2539 && CONST_INT_P (op1
)
2540 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2541 return simplify_gen_binary (IOR
, mode
,
2543 (AND
, mode
, XEXP (op0
, 0),
2544 GEN_INT (UINTVAL (XEXP (op0
, 1))
2548 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2549 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2550 the PLUS does not affect any of the bits in OP1: then we can do
2551 the IOR as a PLUS and we can associate. This is valid if OP1
2552 can be safely shifted left C bits. */
2553 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2554 && GET_CODE (XEXP (op0
, 0)) == PLUS
2555 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2556 && CONST_INT_P (XEXP (op0
, 1))
2557 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2559 int count
= INTVAL (XEXP (op0
, 1));
2560 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2562 if (mask
>> count
== INTVAL (trueop1
)
2563 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2564 return simplify_gen_binary (ASHIFTRT
, mode
,
2565 plus_constant (mode
, XEXP (op0
, 0),
2570 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2576 if (trueop1
== CONST0_RTX (mode
))
2578 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2579 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2580 if (rtx_equal_p (trueop0
, trueop1
)
2581 && ! side_effects_p (op0
)
2582 && GET_MODE_CLASS (mode
) != MODE_CC
)
2583 return CONST0_RTX (mode
);
2585 /* Canonicalize XOR of the most significant bit to PLUS. */
2586 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2587 && mode_signbit_p (mode
, op1
))
2588 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2589 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2590 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2591 && GET_CODE (op0
) == PLUS
2592 && (CONST_INT_P (XEXP (op0
, 1))
2593 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2594 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2595 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2596 simplify_gen_binary (XOR
, mode
, op1
,
2599 /* If we are XORing two things that have no bits in common,
2600 convert them into an IOR. This helps to detect rotation encoded
2601 using those methods and possibly other simplifications. */
2603 if (HWI_COMPUTABLE_MODE_P (mode
)
2604 && (nonzero_bits (op0
, mode
)
2605 & nonzero_bits (op1
, mode
)) == 0)
2606 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2608 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2609 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2612 int num_negated
= 0;
2614 if (GET_CODE (op0
) == NOT
)
2615 num_negated
++, op0
= XEXP (op0
, 0);
2616 if (GET_CODE (op1
) == NOT
)
2617 num_negated
++, op1
= XEXP (op1
, 0);
2619 if (num_negated
== 2)
2620 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2621 else if (num_negated
== 1)
2622 return simplify_gen_unary (NOT
, mode
,
2623 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2627 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2628 correspond to a machine insn or result in further simplifications
2629 if B is a constant. */
2631 if (GET_CODE (op0
) == AND
2632 && rtx_equal_p (XEXP (op0
, 1), op1
)
2633 && ! side_effects_p (op1
))
2634 return simplify_gen_binary (AND
, mode
,
2635 simplify_gen_unary (NOT
, mode
,
2636 XEXP (op0
, 0), mode
),
2639 else if (GET_CODE (op0
) == AND
2640 && rtx_equal_p (XEXP (op0
, 0), op1
)
2641 && ! side_effects_p (op1
))
2642 return simplify_gen_binary (AND
, mode
,
2643 simplify_gen_unary (NOT
, mode
,
2644 XEXP (op0
, 1), mode
),
2647 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2648 we can transform like this:
2649 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2650 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2651 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2652 Attempt a few simplifications when B and C are both constants. */
2653 if (GET_CODE (op0
) == AND
2654 && CONST_INT_P (op1
)
2655 && CONST_INT_P (XEXP (op0
, 1)))
2657 rtx a
= XEXP (op0
, 0);
2658 rtx b
= XEXP (op0
, 1);
2660 HOST_WIDE_INT bval
= INTVAL (b
);
2661 HOST_WIDE_INT cval
= INTVAL (c
);
2664 = simplify_binary_operation (AND
, mode
,
2665 simplify_gen_unary (NOT
, mode
, a
, mode
),
2667 if ((~cval
& bval
) == 0)
2669 /* Try to simplify ~A&C | ~B&C. */
2670 if (na_c
!= NULL_RTX
)
2671 return simplify_gen_binary (IOR
, mode
, na_c
,
2672 GEN_INT (~bval
& cval
));
2676 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2677 if (na_c
== const0_rtx
)
2679 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2680 GEN_INT (~cval
& bval
));
2681 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2682 GEN_INT (~bval
& cval
));
2687 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2688 comparison if STORE_FLAG_VALUE is 1. */
2689 if (STORE_FLAG_VALUE
== 1
2690 && trueop1
== const1_rtx
2691 && COMPARISON_P (op0
)
2692 && (reversed
= reversed_comparison (op0
, mode
)))
2695 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2696 is (lt foo (const_int 0)), so we can perform the above
2697 simplification if STORE_FLAG_VALUE is 1. */
2699 if (STORE_FLAG_VALUE
== 1
2700 && trueop1
== const1_rtx
2701 && GET_CODE (op0
) == LSHIFTRT
2702 && CONST_INT_P (XEXP (op0
, 1))
2703 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2704 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2706 /* (xor (comparison foo bar) (const_int sign-bit))
2707 when STORE_FLAG_VALUE is the sign bit. */
2708 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2709 && trueop1
== const_true_rtx
2710 && COMPARISON_P (op0
)
2711 && (reversed
= reversed_comparison (op0
, mode
)))
2714 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2720 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2722 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2724 if (HWI_COMPUTABLE_MODE_P (mode
))
2726 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2727 HOST_WIDE_INT nzop1
;
2728 if (CONST_INT_P (trueop1
))
2730 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2731 /* If we are turning off bits already known off in OP0, we need
2733 if ((nzop0
& ~val1
) == 0)
2736 nzop1
= nonzero_bits (trueop1
, mode
);
2737 /* If we are clearing all the nonzero bits, the result is zero. */
2738 if ((nzop1
& nzop0
) == 0
2739 && !side_effects_p (op0
) && !side_effects_p (op1
))
2740 return CONST0_RTX (mode
);
2742 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2743 && GET_MODE_CLASS (mode
) != MODE_CC
)
2746 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2747 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2748 && ! side_effects_p (op0
)
2749 && GET_MODE_CLASS (mode
) != MODE_CC
)
2750 return CONST0_RTX (mode
);
2752 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2753 there are no nonzero bits of C outside of X's mode. */
2754 if ((GET_CODE (op0
) == SIGN_EXTEND
2755 || GET_CODE (op0
) == ZERO_EXTEND
)
2756 && CONST_INT_P (trueop1
)
2757 && HWI_COMPUTABLE_MODE_P (mode
)
2758 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2759 & UINTVAL (trueop1
)) == 0)
2761 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2762 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2763 gen_int_mode (INTVAL (trueop1
),
2765 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2768 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2769 we might be able to further simplify the AND with X and potentially
2770 remove the truncation altogether. */
2771 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2773 rtx x
= XEXP (op0
, 0);
2774 enum machine_mode xmode
= GET_MODE (x
);
2775 tem
= simplify_gen_binary (AND
, xmode
, x
,
2776 gen_int_mode (INTVAL (trueop1
), xmode
));
2777 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2780 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2781 if (GET_CODE (op0
) == IOR
2782 && CONST_INT_P (trueop1
)
2783 && CONST_INT_P (XEXP (op0
, 1)))
2785 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2786 return simplify_gen_binary (IOR
, mode
,
2787 simplify_gen_binary (AND
, mode
,
2788 XEXP (op0
, 0), op1
),
2789 gen_int_mode (tmp
, mode
));
2792 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2793 insn (and may simplify more). */
2794 if (GET_CODE (op0
) == XOR
2795 && rtx_equal_p (XEXP (op0
, 0), op1
)
2796 && ! side_effects_p (op1
))
2797 return simplify_gen_binary (AND
, mode
,
2798 simplify_gen_unary (NOT
, mode
,
2799 XEXP (op0
, 1), mode
),
2802 if (GET_CODE (op0
) == XOR
2803 && rtx_equal_p (XEXP (op0
, 1), op1
)
2804 && ! side_effects_p (op1
))
2805 return simplify_gen_binary (AND
, mode
,
2806 simplify_gen_unary (NOT
, mode
,
2807 XEXP (op0
, 0), mode
),
2810 /* Similarly for (~(A ^ B)) & A. */
2811 if (GET_CODE (op0
) == NOT
2812 && GET_CODE (XEXP (op0
, 0)) == XOR
2813 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2814 && ! side_effects_p (op1
))
2815 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2817 if (GET_CODE (op0
) == NOT
2818 && GET_CODE (XEXP (op0
, 0)) == XOR
2819 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2820 && ! side_effects_p (op1
))
2821 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2823 /* Convert (A | B) & A to A. */
2824 if (GET_CODE (op0
) == IOR
2825 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2826 || rtx_equal_p (XEXP (op0
, 1), op1
))
2827 && ! side_effects_p (XEXP (op0
, 0))
2828 && ! side_effects_p (XEXP (op0
, 1)))
2831 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2832 ((A & N) + B) & M -> (A + B) & M
2833 Similarly if (N & M) == 0,
2834 ((A | N) + B) & M -> (A + B) & M
2835 and for - instead of + and/or ^ instead of |.
2836 Also, if (N & M) == 0, then
2837 (A +- N) & M -> A & M. */
2838 if (CONST_INT_P (trueop1
)
2839 && HWI_COMPUTABLE_MODE_P (mode
)
2840 && ~UINTVAL (trueop1
)
2841 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2842 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2847 pmop
[0] = XEXP (op0
, 0);
2848 pmop
[1] = XEXP (op0
, 1);
2850 if (CONST_INT_P (pmop
[1])
2851 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2852 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2854 for (which
= 0; which
< 2; which
++)
2857 switch (GET_CODE (tem
))
2860 if (CONST_INT_P (XEXP (tem
, 1))
2861 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2862 == UINTVAL (trueop1
))
2863 pmop
[which
] = XEXP (tem
, 0);
2867 if (CONST_INT_P (XEXP (tem
, 1))
2868 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2869 pmop
[which
] = XEXP (tem
, 0);
2876 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2878 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2880 return simplify_gen_binary (code
, mode
, tem
, op1
);
2884 /* (and X (ior (not X) Y) -> (and X Y) */
2885 if (GET_CODE (op1
) == IOR
2886 && GET_CODE (XEXP (op1
, 0)) == NOT
2887 && op0
== XEXP (XEXP (op1
, 0), 0))
2888 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2890 /* (and (ior (not X) Y) X) -> (and X Y) */
2891 if (GET_CODE (op0
) == IOR
2892 && GET_CODE (XEXP (op0
, 0)) == NOT
2893 && op1
== XEXP (XEXP (op0
, 0), 0))
2894 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2896 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2902 /* 0/x is 0 (or x&0 if x has side-effects). */
2903 if (trueop0
== CONST0_RTX (mode
))
2905 if (side_effects_p (op1
))
2906 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2910 if (trueop1
== CONST1_RTX (mode
))
2911 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2912 /* Convert divide by power of two into shift. */
2913 if (CONST_INT_P (trueop1
)
2914 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2915 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2919 /* Handle floating point and integers separately. */
2920 if (SCALAR_FLOAT_MODE_P (mode
))
2922 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2923 safe for modes with NaNs, since 0.0 / 0.0 will then be
2924 NaN rather than 0.0. Nor is it safe for modes with signed
2925 zeros, since dividing 0 by a negative number gives -0.0 */
2926 if (trueop0
== CONST0_RTX (mode
)
2927 && !HONOR_NANS (mode
)
2928 && !HONOR_SIGNED_ZEROS (mode
)
2929 && ! side_effects_p (op1
))
2932 if (trueop1
== CONST1_RTX (mode
)
2933 && !HONOR_SNANS (mode
))
2936 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2937 && trueop1
!= CONST0_RTX (mode
))
2940 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2943 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2944 && !HONOR_SNANS (mode
))
2945 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2947 /* Change FP division by a constant into multiplication.
2948 Only do this with -freciprocal-math. */
2949 if (flag_reciprocal_math
2950 && !REAL_VALUES_EQUAL (d
, dconst0
))
2952 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2953 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2954 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2958 else if (SCALAR_INT_MODE_P (mode
))
2960 /* 0/x is 0 (or x&0 if x has side-effects). */
2961 if (trueop0
== CONST0_RTX (mode
)
2962 && !cfun
->can_throw_non_call_exceptions
)
2964 if (side_effects_p (op1
))
2965 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2969 if (trueop1
== CONST1_RTX (mode
))
2970 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2972 if (trueop1
== constm1_rtx
)
2974 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2975 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2981 /* 0%x is 0 (or x&0 if x has side-effects). */
2982 if (trueop0
== CONST0_RTX (mode
))
2984 if (side_effects_p (op1
))
2985 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2988 /* x%1 is 0 (of x&0 if x has side-effects). */
2989 if (trueop1
== CONST1_RTX (mode
))
2991 if (side_effects_p (op0
))
2992 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2993 return CONST0_RTX (mode
);
2995 /* Implement modulus by power of two as AND. */
2996 if (CONST_INT_P (trueop1
)
2997 && exact_log2 (UINTVAL (trueop1
)) > 0)
2998 return simplify_gen_binary (AND
, mode
, op0
,
2999 GEN_INT (INTVAL (op1
) - 1));
3003 /* 0%x is 0 (or x&0 if x has side-effects). */
3004 if (trueop0
== CONST0_RTX (mode
))
3006 if (side_effects_p (op1
))
3007 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3010 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3011 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3013 if (side_effects_p (op0
))
3014 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3015 return CONST0_RTX (mode
);
3022 if (trueop1
== CONST0_RTX (mode
))
3024 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3026 /* Rotating ~0 always results in ~0. */
3027 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3028 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3029 && ! side_effects_p (op1
))
3032 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3034 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3035 if (val
!= INTVAL (op1
))
3036 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3043 if (trueop1
== CONST0_RTX (mode
))
3045 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3047 goto canonicalize_shift
;
3050 if (trueop1
== CONST0_RTX (mode
))
3052 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3054 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3055 if (GET_CODE (op0
) == CLZ
3056 && CONST_INT_P (trueop1
)
3057 && STORE_FLAG_VALUE
== 1
3058 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3060 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3061 unsigned HOST_WIDE_INT zero_val
= 0;
3063 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3064 && zero_val
== GET_MODE_PRECISION (imode
)
3065 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3066 return simplify_gen_relational (EQ
, mode
, imode
,
3067 XEXP (op0
, 0), const0_rtx
);
3069 goto canonicalize_shift
;
3072 if (width
<= HOST_BITS_PER_WIDE_INT
3073 && mode_signbit_p (mode
, trueop1
)
3074 && ! side_effects_p (op0
))
3076 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3078 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3084 if (width
<= HOST_BITS_PER_WIDE_INT
3085 && CONST_INT_P (trueop1
)
3086 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3087 && ! side_effects_p (op0
))
3089 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3091 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3097 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3099 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3101 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3107 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3109 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3111 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3124 /* ??? There are simplifications that can be done. */
3128 if (!VECTOR_MODE_P (mode
))
3130 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3131 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3132 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3133 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3134 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3136 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3137 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3140 /* Extract a scalar element from a nested VEC_SELECT expression
3141 (with optional nested VEC_CONCAT expression). Some targets
3142 (i386) extract scalar element from a vector using chain of
3143 nested VEC_SELECT expressions. When input operand is a memory
3144 operand, this operation can be simplified to a simple scalar
3145 load from an offseted memory address. */
3146 if (GET_CODE (trueop0
) == VEC_SELECT
)
3148 rtx op0
= XEXP (trueop0
, 0);
3149 rtx op1
= XEXP (trueop0
, 1);
3151 enum machine_mode opmode
= GET_MODE (op0
);
3152 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3153 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3155 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3161 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3162 gcc_assert (i
< n_elts
);
3164 /* Select element, pointed by nested selector. */
3165 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3167 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3168 if (GET_CODE (op0
) == VEC_CONCAT
)
3170 rtx op00
= XEXP (op0
, 0);
3171 rtx op01
= XEXP (op0
, 1);
3173 enum machine_mode mode00
, mode01
;
3174 int n_elts00
, n_elts01
;
3176 mode00
= GET_MODE (op00
);
3177 mode01
= GET_MODE (op01
);
3179 /* Find out number of elements of each operand. */
3180 if (VECTOR_MODE_P (mode00
))
3182 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3183 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3188 if (VECTOR_MODE_P (mode01
))
3190 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3191 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3196 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3198 /* Select correct operand of VEC_CONCAT
3199 and adjust selector. */
3200 if (elem
< n_elts01
)
3211 vec
= rtvec_alloc (1);
3212 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3214 tmp
= gen_rtx_fmt_ee (code
, mode
,
3215 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3218 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3219 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3220 return XEXP (trueop0
, 0);
3224 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3225 gcc_assert (GET_MODE_INNER (mode
)
3226 == GET_MODE_INNER (GET_MODE (trueop0
)));
3227 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3229 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3231 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3232 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3233 rtvec v
= rtvec_alloc (n_elts
);
3236 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3237 for (i
= 0; i
< n_elts
; i
++)
3239 rtx x
= XVECEXP (trueop1
, 0, i
);
3241 gcc_assert (CONST_INT_P (x
));
3242 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3246 return gen_rtx_CONST_VECTOR (mode
, v
);
3249 /* If we build {a,b} then permute it, build the result directly. */
3250 if (XVECLEN (trueop1
, 0) == 2
3251 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3252 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3253 && GET_CODE (trueop0
) == VEC_CONCAT
3254 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3255 && GET_MODE (XEXP (trueop0
, 0)) == mode
3256 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3257 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3259 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3260 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3263 gcc_assert (i0
< 4 && i1
< 4);
3264 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3265 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3267 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3270 if (XVECLEN (trueop1
, 0) == 2
3271 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3272 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3273 && GET_CODE (trueop0
) == VEC_CONCAT
3274 && GET_MODE (trueop0
) == mode
)
3276 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3277 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3280 gcc_assert (i0
< 2 && i1
< 2);
3281 subop0
= XEXP (trueop0
, i0
);
3282 subop1
= XEXP (trueop0
, i1
);
3284 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3288 if (XVECLEN (trueop1
, 0) == 1
3289 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3290 && GET_CODE (trueop0
) == VEC_CONCAT
)
3293 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3295 /* Try to find the element in the VEC_CONCAT. */
3296 while (GET_MODE (vec
) != mode
3297 && GET_CODE (vec
) == VEC_CONCAT
)
3299 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3300 if (offset
< vec_size
)
3301 vec
= XEXP (vec
, 0);
3305 vec
= XEXP (vec
, 1);
3307 vec
= avoid_constant_pool_reference (vec
);
3310 if (GET_MODE (vec
) == mode
)
3317 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3318 ? GET_MODE (trueop0
)
3319 : GET_MODE_INNER (mode
));
3320 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3321 ? GET_MODE (trueop1
)
3322 : GET_MODE_INNER (mode
));
3324 gcc_assert (VECTOR_MODE_P (mode
));
3325 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3326 == GET_MODE_SIZE (mode
));
3328 if (VECTOR_MODE_P (op0_mode
))
3329 gcc_assert (GET_MODE_INNER (mode
)
3330 == GET_MODE_INNER (op0_mode
));
3332 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3334 if (VECTOR_MODE_P (op1_mode
))
3335 gcc_assert (GET_MODE_INNER (mode
)
3336 == GET_MODE_INNER (op1_mode
));
3338 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3340 if ((GET_CODE (trueop0
) == CONST_VECTOR
3341 || CONST_INT_P (trueop0
) || CONST_DOUBLE_P (trueop0
))
3342 && (GET_CODE (trueop1
) == CONST_VECTOR
3343 || CONST_INT_P (trueop1
) || CONST_DOUBLE_P (trueop1
)))
3345 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3346 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3347 rtvec v
= rtvec_alloc (n_elts
);
3349 unsigned in_n_elts
= 1;
3351 if (VECTOR_MODE_P (op0_mode
))
3352 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3353 for (i
= 0; i
< n_elts
; i
++)
3357 if (!VECTOR_MODE_P (op0_mode
))
3358 RTVEC_ELT (v
, i
) = trueop0
;
3360 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3364 if (!VECTOR_MODE_P (op1_mode
))
3365 RTVEC_ELT (v
, i
) = trueop1
;
3367 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3372 return gen_rtx_CONST_VECTOR (mode
, v
);
3385 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3388 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3390 unsigned int width
= GET_MODE_PRECISION (mode
);
3392 if (VECTOR_MODE_P (mode
)
3393 && code
!= VEC_CONCAT
3394 && GET_CODE (op0
) == CONST_VECTOR
3395 && GET_CODE (op1
) == CONST_VECTOR
)
3397 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3398 enum machine_mode op0mode
= GET_MODE (op0
);
3399 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3400 enum machine_mode op1mode
= GET_MODE (op1
);
3401 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3402 rtvec v
= rtvec_alloc (n_elts
);
3405 gcc_assert (op0_n_elts
== n_elts
);
3406 gcc_assert (op1_n_elts
== n_elts
);
3407 for (i
= 0; i
< n_elts
; i
++)
3409 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3410 CONST_VECTOR_ELT (op0
, i
),
3411 CONST_VECTOR_ELT (op1
, i
));
3414 RTVEC_ELT (v
, i
) = x
;
3417 return gen_rtx_CONST_VECTOR (mode
, v
);
3420 if (VECTOR_MODE_P (mode
)
3421 && code
== VEC_CONCAT
3422 && (CONST_INT_P (op0
)
3423 || GET_CODE (op0
) == CONST_FIXED
3424 || CONST_DOUBLE_P (op0
))
3425 && (CONST_INT_P (op1
)
3426 || CONST_DOUBLE_P (op1
)
3427 || GET_CODE (op1
) == CONST_FIXED
))
3429 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3430 rtvec v
= rtvec_alloc (n_elts
);
3432 gcc_assert (n_elts
>= 2);
3435 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3436 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3438 RTVEC_ELT (v
, 0) = op0
;
3439 RTVEC_ELT (v
, 1) = op1
;
3443 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3444 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3447 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3448 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3449 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3451 for (i
= 0; i
< op0_n_elts
; ++i
)
3452 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3453 for (i
= 0; i
< op1_n_elts
; ++i
)
3454 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3457 return gen_rtx_CONST_VECTOR (mode
, v
);
3460 if (SCALAR_FLOAT_MODE_P (mode
)
3461 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3462 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3463 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3474 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3476 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3478 for (i
= 0; i
< 4; i
++)
3495 real_from_target (&r
, tmp0
, mode
);
3496 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3500 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3503 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3504 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3505 real_convert (&f0
, mode
, &f0
);
3506 real_convert (&f1
, mode
, &f1
);
3508 if (HONOR_SNANS (mode
)
3509 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3513 && REAL_VALUES_EQUAL (f1
, dconst0
)
3514 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3517 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3518 && flag_trapping_math
3519 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3521 int s0
= REAL_VALUE_NEGATIVE (f0
);
3522 int s1
= REAL_VALUE_NEGATIVE (f1
);
3527 /* Inf + -Inf = NaN plus exception. */
3532 /* Inf - Inf = NaN plus exception. */
3537 /* Inf / Inf = NaN plus exception. */
3544 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3545 && flag_trapping_math
3546 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3547 || (REAL_VALUE_ISINF (f1
)
3548 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3549 /* Inf * 0 = NaN plus exception. */
3552 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3554 real_convert (&result
, mode
, &value
);
3556 /* Don't constant fold this floating point operation if
3557 the result has overflowed and flag_trapping_math. */
3559 if (flag_trapping_math
3560 && MODE_HAS_INFINITIES (mode
)
3561 && REAL_VALUE_ISINF (result
)
3562 && !REAL_VALUE_ISINF (f0
)
3563 && !REAL_VALUE_ISINF (f1
))
3564 /* Overflow plus exception. */
3567 /* Don't constant fold this floating point operation if the
3568 result may dependent upon the run-time rounding mode and
3569 flag_rounding_math is set, or if GCC's software emulation
3570 is unable to accurately represent the result. */
3572 if ((flag_rounding_math
3573 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3574 && (inexact
|| !real_identical (&result
, &value
)))
3577 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3581 /* We can fold some multi-word operations. */
3582 if (GET_MODE_CLASS (mode
) == MODE_INT
3583 && width
== HOST_BITS_PER_DOUBLE_INT
3584 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3585 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3587 double_int o0
, o1
, res
, tmp
;
3590 o0
= rtx_to_double_int (op0
);
3591 o1
= rtx_to_double_int (op1
);
3596 /* A - B == A + (-B). */
3599 /* Fall through.... */
3610 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3617 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3624 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3631 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3665 case LSHIFTRT
: case ASHIFTRT
:
3667 case ROTATE
: case ROTATERT
:
3669 unsigned HOST_WIDE_INT cnt
;
3671 if (SHIFT_COUNT_TRUNCATED
)
3674 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3677 if (!o1
.fits_uhwi ()
3678 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
3681 cnt
= o1
.to_uhwi ();
3682 unsigned short prec
= GET_MODE_PRECISION (mode
);
3684 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3685 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
3686 else if (code
== ASHIFT
)
3687 res
= o0
.alshift (cnt
, prec
);
3688 else if (code
== ROTATE
)
3689 res
= o0
.lrotate (cnt
, prec
);
3690 else /* code == ROTATERT */
3691 res
= o0
.rrotate (cnt
, prec
);
3699 return immed_double_int_const (res
, mode
);
3702 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3703 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3705 /* Get the integer argument values in two forms:
3706 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3708 arg0
= INTVAL (op0
);
3709 arg1
= INTVAL (op1
);
3711 if (width
< HOST_BITS_PER_WIDE_INT
)
3713 arg0
&= GET_MODE_MASK (mode
);
3714 arg1
&= GET_MODE_MASK (mode
);
3717 if (val_signbit_known_set_p (mode
, arg0s
))
3718 arg0s
|= ~GET_MODE_MASK (mode
);
3721 if (val_signbit_known_set_p (mode
, arg1s
))
3722 arg1s
|= ~GET_MODE_MASK (mode
);
3730 /* Compute the value of the arithmetic. */
3735 val
= arg0s
+ arg1s
;
3739 val
= arg0s
- arg1s
;
3743 val
= arg0s
* arg1s
;
3748 || ((unsigned HOST_WIDE_INT
) arg0s
3749 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3752 val
= arg0s
/ arg1s
;
3757 || ((unsigned HOST_WIDE_INT
) arg0s
3758 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3761 val
= arg0s
% arg1s
;
3766 || ((unsigned HOST_WIDE_INT
) arg0s
3767 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3770 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3775 || ((unsigned HOST_WIDE_INT
) arg0s
3776 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3779 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3797 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3798 the value is in range. We can't return any old value for
3799 out-of-range arguments because either the middle-end (via
3800 shift_truncation_mask) or the back-end might be relying on
3801 target-specific knowledge. Nor can we rely on
3802 shift_truncation_mask, since the shift might not be part of an
3803 ashlM3, lshrM3 or ashrM3 instruction. */
3804 if (SHIFT_COUNT_TRUNCATED
)
3805 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3806 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3809 val
= (code
== ASHIFT
3810 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3811 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3813 /* Sign-extend the result for arithmetic right shifts. */
3814 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3815 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3823 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3824 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3832 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3833 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3837 /* Do nothing here. */
3841 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3845 val
= ((unsigned HOST_WIDE_INT
) arg0
3846 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3850 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3854 val
= ((unsigned HOST_WIDE_INT
) arg0
3855 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3868 /* ??? There are simplifications that can be done. */
3875 return gen_int_mode (val
, mode
);
3883 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3886 Rather than test for specific case, we do this by a brute-force method
3887 and do all possible simplifications until no more changes occur. Then
3888 we rebuild the operation. */
3890 struct simplify_plus_minus_op_data
3897 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3901 result
= (commutative_operand_precedence (y
)
3902 - commutative_operand_precedence (x
));
3906 /* Group together equal REGs to do more simplification. */
3907 if (REG_P (x
) && REG_P (y
))
3908 return REGNO (x
) > REGNO (y
);
3914 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3917 struct simplify_plus_minus_op_data ops
[8];
3919 int n_ops
= 2, input_ops
= 2;
3920 int changed
, n_constants
= 0, canonicalized
= 0;
3923 memset (ops
, 0, sizeof ops
);
3925 /* Set up the two operands and then expand them until nothing has been
3926 changed. If we run out of room in our array, give up; this should
3927 almost never happen. */
3932 ops
[1].neg
= (code
== MINUS
);
3938 for (i
= 0; i
< n_ops
; i
++)
3940 rtx this_op
= ops
[i
].op
;
3941 int this_neg
= ops
[i
].neg
;
3942 enum rtx_code this_code
= GET_CODE (this_op
);
3951 ops
[n_ops
].op
= XEXP (this_op
, 1);
3952 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3955 ops
[i
].op
= XEXP (this_op
, 0);
3958 canonicalized
|= this_neg
;
3962 ops
[i
].op
= XEXP (this_op
, 0);
3963 ops
[i
].neg
= ! this_neg
;
3970 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3971 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3972 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3974 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3975 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3976 ops
[n_ops
].neg
= this_neg
;
3984 /* ~a -> (-a - 1) */
3987 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3988 ops
[n_ops
++].neg
= this_neg
;
3989 ops
[i
].op
= XEXP (this_op
, 0);
3990 ops
[i
].neg
= !this_neg
;
4000 ops
[i
].op
= neg_const_int (mode
, this_op
);
4014 if (n_constants
> 1)
4017 gcc_assert (n_ops
>= 2);
4019 /* If we only have two operands, we can avoid the loops. */
4022 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4025 /* Get the two operands. Be careful with the order, especially for
4026 the cases where code == MINUS. */
4027 if (ops
[0].neg
&& ops
[1].neg
)
4029 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4032 else if (ops
[0].neg
)
4043 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4046 /* Now simplify each pair of operands until nothing changes. */
4049 /* Insertion sort is good enough for an eight-element array. */
4050 for (i
= 1; i
< n_ops
; i
++)
4052 struct simplify_plus_minus_op_data save
;
4054 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4060 ops
[j
+ 1] = ops
[j
];
4061 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4066 for (i
= n_ops
- 1; i
> 0; i
--)
4067 for (j
= i
- 1; j
>= 0; j
--)
4069 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4070 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4072 if (lhs
!= 0 && rhs
!= 0)
4074 enum rtx_code ncode
= PLUS
;
4080 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4082 else if (swap_commutative_operands_p (lhs
, rhs
))
4083 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4085 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4086 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4088 rtx tem_lhs
, tem_rhs
;
4090 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4091 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4092 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4094 if (tem
&& !CONSTANT_P (tem
))
4095 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4098 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4100 /* Reject "simplifications" that just wrap the two
4101 arguments in a CONST. Failure to do so can result
4102 in infinite recursion with simplify_binary_operation
4103 when it calls us to simplify CONST operations. */
4105 && ! (GET_CODE (tem
) == CONST
4106 && GET_CODE (XEXP (tem
, 0)) == ncode
4107 && XEXP (XEXP (tem
, 0), 0) == lhs
4108 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4111 if (GET_CODE (tem
) == NEG
)
4112 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4113 if (CONST_INT_P (tem
) && lneg
)
4114 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4118 ops
[j
].op
= NULL_RTX
;
4125 /* If nothing changed, fail. */
4129 /* Pack all the operands to the lower-numbered entries. */
4130 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4140 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4142 && CONST_INT_P (ops
[1].op
)
4143 && CONSTANT_P (ops
[0].op
)
4145 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4147 /* We suppressed creation of trivial CONST expressions in the
4148 combination loop to avoid recursion. Create one manually now.
4149 The combination loop should have ensured that there is exactly
4150 one CONST_INT, and the sort will have ensured that it is last
4151 in the array and that any other constant will be next-to-last. */
4154 && CONST_INT_P (ops
[n_ops
- 1].op
)
4155 && CONSTANT_P (ops
[n_ops
- 2].op
))
4157 rtx value
= ops
[n_ops
- 1].op
;
4158 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4159 value
= neg_const_int (mode
, value
);
4160 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4165 /* Put a non-negated operand first, if possible. */
4167 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4170 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4179 /* Now make the result by performing the requested operations. */
4181 for (i
= 1; i
< n_ops
; i
++)
4182 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4183 mode
, result
, ops
[i
].op
);
4188 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4190 plus_minus_operand_p (const_rtx x
)
4192 return GET_CODE (x
) == PLUS
4193 || GET_CODE (x
) == MINUS
4194 || (GET_CODE (x
) == CONST
4195 && GET_CODE (XEXP (x
, 0)) == PLUS
4196 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4197 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4200 /* Like simplify_binary_operation except used for relational operators.
4201 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4202 not also be VOIDmode.
4204 CMP_MODE specifies in which mode the comparison is done in, so it is
4205 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4206 the operands or, if both are VOIDmode, the operands are compared in
4207 "infinite precision". */
4209 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4210 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4212 rtx tem
, trueop0
, trueop1
;
4214 if (cmp_mode
== VOIDmode
)
4215 cmp_mode
= GET_MODE (op0
);
4216 if (cmp_mode
== VOIDmode
)
4217 cmp_mode
= GET_MODE (op1
);
4219 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4222 if (SCALAR_FLOAT_MODE_P (mode
))
4224 if (tem
== const0_rtx
)
4225 return CONST0_RTX (mode
);
4226 #ifdef FLOAT_STORE_FLAG_VALUE
4228 REAL_VALUE_TYPE val
;
4229 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4230 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4236 if (VECTOR_MODE_P (mode
))
4238 if (tem
== const0_rtx
)
4239 return CONST0_RTX (mode
);
4240 #ifdef VECTOR_STORE_FLAG_VALUE
4245 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4246 if (val
== NULL_RTX
)
4248 if (val
== const1_rtx
)
4249 return CONST1_RTX (mode
);
4251 units
= GET_MODE_NUNITS (mode
);
4252 v
= rtvec_alloc (units
);
4253 for (i
= 0; i
< units
; i
++)
4254 RTVEC_ELT (v
, i
) = val
;
4255 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4265 /* For the following tests, ensure const0_rtx is op1. */
4266 if (swap_commutative_operands_p (op0
, op1
)
4267 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4268 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4270 /* If op0 is a compare, extract the comparison arguments from it. */
4271 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4272 return simplify_gen_relational (code
, mode
, VOIDmode
,
4273 XEXP (op0
, 0), XEXP (op0
, 1));
4275 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4279 trueop0
= avoid_constant_pool_reference (op0
);
4280 trueop1
= avoid_constant_pool_reference (op1
);
4281 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4285 /* This part of simplify_relational_operation is only used when CMP_MODE
4286 is not in class MODE_CC (i.e. it is a real comparison).
4288 MODE is the mode of the result, while CMP_MODE specifies in which
4289 mode the comparison is done in, so it is the mode of the operands. */
4292 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4293 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4295 enum rtx_code op0code
= GET_CODE (op0
);
4297 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4299 /* If op0 is a comparison, extract the comparison arguments
4303 if (GET_MODE (op0
) == mode
)
4304 return simplify_rtx (op0
);
4306 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4307 XEXP (op0
, 0), XEXP (op0
, 1));
4309 else if (code
== EQ
)
4311 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4312 if (new_code
!= UNKNOWN
)
4313 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4314 XEXP (op0
, 0), XEXP (op0
, 1));
4318 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4319 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4320 if ((code
== LTU
|| code
== GEU
)
4321 && GET_CODE (op0
) == PLUS
4322 && CONST_INT_P (XEXP (op0
, 1))
4323 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4324 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4327 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4328 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4329 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4332 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4333 if ((code
== LTU
|| code
== GEU
)
4334 && GET_CODE (op0
) == PLUS
4335 && rtx_equal_p (op1
, XEXP (op0
, 1))
4336 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4337 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4338 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4339 copy_rtx (XEXP (op0
, 0)));
4341 if (op1
== const0_rtx
)
4343 /* Canonicalize (GTU x 0) as (NE x 0). */
4345 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4346 /* Canonicalize (LEU x 0) as (EQ x 0). */
4348 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4350 else if (op1
== const1_rtx
)
4355 /* Canonicalize (GE x 1) as (GT x 0). */
4356 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4359 /* Canonicalize (GEU x 1) as (NE x 0). */
4360 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4363 /* Canonicalize (LT x 1) as (LE x 0). */
4364 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4367 /* Canonicalize (LTU x 1) as (EQ x 0). */
4368 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4374 else if (op1
== constm1_rtx
)
4376 /* Canonicalize (LE x -1) as (LT x 0). */
4378 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4379 /* Canonicalize (GT x -1) as (GE x 0). */
4381 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4384 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4385 if ((code
== EQ
|| code
== NE
)
4386 && (op0code
== PLUS
|| op0code
== MINUS
)
4388 && CONSTANT_P (XEXP (op0
, 1))
4389 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4391 rtx x
= XEXP (op0
, 0);
4392 rtx c
= XEXP (op0
, 1);
4393 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4394 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4396 /* Detect an infinite recursive condition, where we oscillate at this
4397 simplification case between:
4398 A + B == C <---> C - B == A,
4399 where A, B, and C are all constants with non-simplifiable expressions,
4400 usually SYMBOL_REFs. */
4401 if (GET_CODE (tem
) == invcode
4403 && rtx_equal_p (c
, XEXP (tem
, 1)))
4406 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4409 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4410 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4412 && op1
== const0_rtx
4413 && GET_MODE_CLASS (mode
) == MODE_INT
4414 && cmp_mode
!= VOIDmode
4415 /* ??? Work-around BImode bugs in the ia64 backend. */
4417 && cmp_mode
!= BImode
4418 && nonzero_bits (op0
, cmp_mode
) == 1
4419 && STORE_FLAG_VALUE
== 1)
4420 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4421 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4422 : lowpart_subreg (mode
, op0
, cmp_mode
);
4424 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4425 if ((code
== EQ
|| code
== NE
)
4426 && op1
== const0_rtx
4428 return simplify_gen_relational (code
, mode
, cmp_mode
,
4429 XEXP (op0
, 0), XEXP (op0
, 1));
4431 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4432 if ((code
== EQ
|| code
== NE
)
4434 && rtx_equal_p (XEXP (op0
, 0), op1
)
4435 && !side_effects_p (XEXP (op0
, 0)))
4436 return simplify_gen_relational (code
, mode
, cmp_mode
,
4437 XEXP (op0
, 1), const0_rtx
);
4439 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4440 if ((code
== EQ
|| code
== NE
)
4442 && rtx_equal_p (XEXP (op0
, 1), op1
)
4443 && !side_effects_p (XEXP (op0
, 1)))
4444 return simplify_gen_relational (code
, mode
, cmp_mode
,
4445 XEXP (op0
, 0), const0_rtx
);
4447 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4448 if ((code
== EQ
|| code
== NE
)
4450 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
4451 && (CONST_INT_P (XEXP (op0
, 1))
4452 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1))))
4453 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4454 simplify_gen_binary (XOR
, cmp_mode
,
4455 XEXP (op0
, 1), op1
));
4457 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4463 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4464 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4465 XEXP (op0
, 0), const0_rtx
);
4470 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4471 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4472 XEXP (op0
, 0), const0_rtx
);
4491 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4492 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4493 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4494 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4495 For floating-point comparisons, assume that the operands were ordered. */
4498 comparison_result (enum rtx_code code
, int known_results
)
4504 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4507 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4511 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4514 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4518 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4521 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4524 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4526 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4529 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4531 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4534 return const_true_rtx
;
4542 /* Check if the given comparison (done in the given MODE) is actually a
4543 tautology or a contradiction.
4544 If no simplification is possible, this function returns zero.
4545 Otherwise, it returns either const_true_rtx or const0_rtx. */
4548 simplify_const_relational_operation (enum rtx_code code
,
4549 enum machine_mode mode
,
4556 gcc_assert (mode
!= VOIDmode
4557 || (GET_MODE (op0
) == VOIDmode
4558 && GET_MODE (op1
) == VOIDmode
));
4560 /* If op0 is a compare, extract the comparison arguments from it. */
4561 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4563 op1
= XEXP (op0
, 1);
4564 op0
= XEXP (op0
, 0);
4566 if (GET_MODE (op0
) != VOIDmode
)
4567 mode
= GET_MODE (op0
);
4568 else if (GET_MODE (op1
) != VOIDmode
)
4569 mode
= GET_MODE (op1
);
4574 /* We can't simplify MODE_CC values since we don't know what the
4575 actual comparison is. */
4576 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4579 /* Make sure the constant is second. */
4580 if (swap_commutative_operands_p (op0
, op1
))
4582 tem
= op0
, op0
= op1
, op1
= tem
;
4583 code
= swap_condition (code
);
4586 trueop0
= avoid_constant_pool_reference (op0
);
4587 trueop1
= avoid_constant_pool_reference (op1
);
4589 /* For integer comparisons of A and B maybe we can simplify A - B and can
4590 then simplify a comparison of that with zero. If A and B are both either
4591 a register or a CONST_INT, this can't help; testing for these cases will
4592 prevent infinite recursion here and speed things up.
4594 We can only do this for EQ and NE comparisons as otherwise we may
4595 lose or introduce overflow which we cannot disregard as undefined as
4596 we do not know the signedness of the operation on either the left or
4597 the right hand side of the comparison. */
4599 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4600 && (code
== EQ
|| code
== NE
)
4601 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4602 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4603 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4604 /* We cannot do this if tem is a nonzero address. */
4605 && ! nonzero_address_p (tem
))
4606 return simplify_const_relational_operation (signed_condition (code
),
4607 mode
, tem
, const0_rtx
);
4609 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4610 return const_true_rtx
;
4612 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4615 /* For modes without NaNs, if the two operands are equal, we know the
4616 result except if they have side-effects. Even with NaNs we know
4617 the result of unordered comparisons and, if signaling NaNs are
4618 irrelevant, also the result of LT/GT/LTGT. */
4619 if ((! HONOR_NANS (GET_MODE (trueop0
))
4620 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4621 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4622 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4623 && rtx_equal_p (trueop0
, trueop1
)
4624 && ! side_effects_p (trueop0
))
4625 return comparison_result (code
, CMP_EQ
);
4627 /* If the operands are floating-point constants, see if we can fold
4629 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4630 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4631 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4633 REAL_VALUE_TYPE d0
, d1
;
4635 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4636 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4638 /* Comparisons are unordered iff at least one of the values is NaN. */
4639 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4649 return const_true_rtx
;
4662 return comparison_result (code
,
4663 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4664 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4667 /* Otherwise, see if the operands are both integers. */
4668 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4669 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4670 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4672 int width
= GET_MODE_PRECISION (mode
);
4673 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4674 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4676 /* Get the two words comprising each integer constant. */
4677 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4679 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4680 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4684 l0u
= l0s
= INTVAL (trueop0
);
4685 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4688 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4690 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4691 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4695 l1u
= l1s
= INTVAL (trueop1
);
4696 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4699 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4700 we have to sign or zero-extend the values. */
4701 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4703 l0u
&= GET_MODE_MASK (mode
);
4704 l1u
&= GET_MODE_MASK (mode
);
4706 if (val_signbit_known_set_p (mode
, l0s
))
4707 l0s
|= ~GET_MODE_MASK (mode
);
4709 if (val_signbit_known_set_p (mode
, l1s
))
4710 l1s
|= ~GET_MODE_MASK (mode
);
4712 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4713 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4715 if (h0u
== h1u
&& l0u
== l1u
)
4716 return comparison_result (code
, CMP_EQ
);
4720 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4721 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4722 return comparison_result (code
, cr
);
4726 /* Optimize comparisons with upper and lower bounds. */
4727 if (HWI_COMPUTABLE_MODE_P (mode
)
4728 && CONST_INT_P (trueop1
))
4731 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4732 HOST_WIDE_INT val
= INTVAL (trueop1
);
4733 HOST_WIDE_INT mmin
, mmax
;
4743 /* Get a reduced range if the sign bit is zero. */
4744 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4751 rtx mmin_rtx
, mmax_rtx
;
4752 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4754 mmin
= INTVAL (mmin_rtx
);
4755 mmax
= INTVAL (mmax_rtx
);
4758 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4760 mmin
>>= (sign_copies
- 1);
4761 mmax
>>= (sign_copies
- 1);
4767 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4769 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4770 return const_true_rtx
;
4771 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4776 return const_true_rtx
;
4781 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4783 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4784 return const_true_rtx
;
4785 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4790 return const_true_rtx
;
4796 /* x == y is always false for y out of range. */
4797 if (val
< mmin
|| val
> mmax
)
4801 /* x > y is always false for y >= mmax, always true for y < mmin. */
4803 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4805 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4806 return const_true_rtx
;
4812 return const_true_rtx
;
4815 /* x < y is always false for y <= mmin, always true for y > mmax. */
4817 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4819 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4820 return const_true_rtx
;
4826 return const_true_rtx
;
4830 /* x != y is always true for y out of range. */
4831 if (val
< mmin
|| val
> mmax
)
4832 return const_true_rtx
;
4840 /* Optimize integer comparisons with zero. */
4841 if (trueop1
== const0_rtx
)
4843 /* Some addresses are known to be nonzero. We don't know
4844 their sign, but equality comparisons are known. */
4845 if (nonzero_address_p (trueop0
))
4847 if (code
== EQ
|| code
== LEU
)
4849 if (code
== NE
|| code
== GTU
)
4850 return const_true_rtx
;
4853 /* See if the first operand is an IOR with a constant. If so, we
4854 may be able to determine the result of this comparison. */
4855 if (GET_CODE (op0
) == IOR
)
4857 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4858 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4860 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4861 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4862 && (UINTVAL (inner_const
)
4863 & ((unsigned HOST_WIDE_INT
) 1
4873 return const_true_rtx
;
4877 return const_true_rtx
;
4891 /* Optimize comparison of ABS with zero. */
4892 if (trueop1
== CONST0_RTX (mode
)
4893 && (GET_CODE (trueop0
) == ABS
4894 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4895 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4900 /* Optimize abs(x) < 0.0. */
4901 if (!HONOR_SNANS (mode
)
4902 && (!INTEGRAL_MODE_P (mode
)
4903 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4905 if (INTEGRAL_MODE_P (mode
)
4906 && (issue_strict_overflow_warning
4907 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4908 warning (OPT_Wstrict_overflow
,
4909 ("assuming signed overflow does not occur when "
4910 "assuming abs (x) < 0 is false"));
4916 /* Optimize abs(x) >= 0.0. */
4917 if (!HONOR_NANS (mode
)
4918 && (!INTEGRAL_MODE_P (mode
)
4919 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4921 if (INTEGRAL_MODE_P (mode
)
4922 && (issue_strict_overflow_warning
4923 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4924 warning (OPT_Wstrict_overflow
,
4925 ("assuming signed overflow does not occur when "
4926 "assuming abs (x) >= 0 is true"));
4927 return const_true_rtx
;
4932 /* Optimize ! (abs(x) < 0.0). */
4933 return const_true_rtx
;
4943 /* Simplify CODE, an operation with result mode MODE and three operands,
4944 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4945 a constant. Return 0 if no simplifications is possible. */
4948 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4949 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4952 unsigned int width
= GET_MODE_PRECISION (mode
);
4953 bool any_change
= false;
4956 /* VOIDmode means "infinite" precision. */
4958 width
= HOST_BITS_PER_WIDE_INT
;
4963 /* Simplify negations around the multiplication. */
4964 /* -a * -b + c => a * b + c. */
4965 if (GET_CODE (op0
) == NEG
)
4967 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4969 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4971 else if (GET_CODE (op1
) == NEG
)
4973 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4975 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4978 /* Canonicalize the two multiplication operands. */
4979 /* a * -b + c => -b * a + c. */
4980 if (swap_commutative_operands_p (op0
, op1
))
4981 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4984 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4989 if (CONST_INT_P (op0
)
4990 && CONST_INT_P (op1
)
4991 && CONST_INT_P (op2
)
4992 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4993 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4995 /* Extracting a bit-field from a constant */
4996 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4997 HOST_WIDE_INT op1val
= INTVAL (op1
);
4998 HOST_WIDE_INT op2val
= INTVAL (op2
);
4999 if (BITS_BIG_ENDIAN
)
5000 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5004 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5006 /* First zero-extend. */
5007 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5008 /* If desired, propagate sign bit. */
5009 if (code
== SIGN_EXTRACT
5010 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5012 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5015 return gen_int_mode (val
, mode
);
5020 if (CONST_INT_P (op0
))
5021 return op0
!= const0_rtx
? op1
: op2
;
5023 /* Convert c ? a : a into "a". */
5024 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5027 /* Convert a != b ? a : b into "a". */
5028 if (GET_CODE (op0
) == NE
5029 && ! side_effects_p (op0
)
5030 && ! HONOR_NANS (mode
)
5031 && ! HONOR_SIGNED_ZEROS (mode
)
5032 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5033 && rtx_equal_p (XEXP (op0
, 1), op2
))
5034 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5035 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5038 /* Convert a == b ? a : b into "b". */
5039 if (GET_CODE (op0
) == EQ
5040 && ! side_effects_p (op0
)
5041 && ! HONOR_NANS (mode
)
5042 && ! HONOR_SIGNED_ZEROS (mode
)
5043 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5044 && rtx_equal_p (XEXP (op0
, 1), op2
))
5045 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5046 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5049 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5051 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5052 ? GET_MODE (XEXP (op0
, 1))
5053 : GET_MODE (XEXP (op0
, 0)));
5056 /* Look for happy constants in op1 and op2. */
5057 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5059 HOST_WIDE_INT t
= INTVAL (op1
);
5060 HOST_WIDE_INT f
= INTVAL (op2
);
5062 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5063 code
= GET_CODE (op0
);
5064 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5067 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5075 return simplify_gen_relational (code
, mode
, cmp_mode
,
5076 XEXP (op0
, 0), XEXP (op0
, 1));
5079 if (cmp_mode
== VOIDmode
)
5080 cmp_mode
= op0_mode
;
5081 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5082 cmp_mode
, XEXP (op0
, 0),
5085 /* See if any simplifications were possible. */
5088 if (CONST_INT_P (temp
))
5089 return temp
== const0_rtx
? op2
: op1
;
5091 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5097 gcc_assert (GET_MODE (op0
) == mode
);
5098 gcc_assert (GET_MODE (op1
) == mode
);
5099 gcc_assert (VECTOR_MODE_P (mode
));
5100 op2
= avoid_constant_pool_reference (op2
);
5101 if (CONST_INT_P (op2
))
5103 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5104 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5105 int mask
= (1 << n_elts
) - 1;
5107 if (!(INTVAL (op2
) & mask
))
5109 if ((INTVAL (op2
) & mask
) == mask
)
5112 op0
= avoid_constant_pool_reference (op0
);
5113 op1
= avoid_constant_pool_reference (op1
);
5114 if (GET_CODE (op0
) == CONST_VECTOR
5115 && GET_CODE (op1
) == CONST_VECTOR
)
5117 rtvec v
= rtvec_alloc (n_elts
);
5120 for (i
= 0; i
< n_elts
; i
++)
5121 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5122 ? CONST_VECTOR_ELT (op0
, i
)
5123 : CONST_VECTOR_ELT (op1
, i
));
5124 return gen_rtx_CONST_VECTOR (mode
, v
);
5136 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5138 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5140 Works by unpacking OP into a collection of 8-bit values
5141 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5142 and then repacking them again for OUTERMODE. */
5145 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5146 enum machine_mode innermode
, unsigned int byte
)
5148 /* We support up to 512-bit values (for V8DFmode). */
5152 value_mask
= (1 << value_bit
) - 1
5154 unsigned char value
[max_bitsize
/ value_bit
];
5163 rtvec result_v
= NULL
;
5164 enum mode_class outer_class
;
5165 enum machine_mode outer_submode
;
5167 /* Some ports misuse CCmode. */
5168 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5171 /* We have no way to represent a complex constant at the rtl level. */
5172 if (COMPLEX_MODE_P (outermode
))
5175 /* Unpack the value. */
5177 if (GET_CODE (op
) == CONST_VECTOR
)
5179 num_elem
= CONST_VECTOR_NUNITS (op
);
5180 elems
= &CONST_VECTOR_ELT (op
, 0);
5181 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5187 elem_bitsize
= max_bitsize
;
5189 /* If this asserts, it is too complicated; reducing value_bit may help. */
5190 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5191 /* I don't know how to handle endianness of sub-units. */
5192 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5194 for (elem
= 0; elem
< num_elem
; elem
++)
5197 rtx el
= elems
[elem
];
5199 /* Vectors are kept in target memory order. (This is probably
5202 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5203 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5205 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5206 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5207 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5208 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5209 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5212 switch (GET_CODE (el
))
5216 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5218 *vp
++ = INTVAL (el
) >> i
;
5219 /* CONST_INTs are always logically sign-extended. */
5220 for (; i
< elem_bitsize
; i
+= value_bit
)
5221 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5225 if (GET_MODE (el
) == VOIDmode
)
5227 unsigned char extend
= 0;
5228 /* If this triggers, someone should have generated a
5229 CONST_INT instead. */
5230 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5232 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5233 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5234 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5237 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5241 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5243 for (; i
< elem_bitsize
; i
+= value_bit
)
5248 long tmp
[max_bitsize
/ 32];
5249 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5251 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5252 gcc_assert (bitsize
<= elem_bitsize
);
5253 gcc_assert (bitsize
% value_bit
== 0);
5255 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5258 /* real_to_target produces its result in words affected by
5259 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5260 and use WORDS_BIG_ENDIAN instead; see the documentation
5261 of SUBREG in rtl.texi. */
5262 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5265 if (WORDS_BIG_ENDIAN
)
5266 ibase
= bitsize
- 1 - i
;
5269 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5272 /* It shouldn't matter what's done here, so fill it with
5274 for (; i
< elem_bitsize
; i
+= value_bit
)
5280 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5282 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5283 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5287 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5288 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5289 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5291 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5292 >> (i
- HOST_BITS_PER_WIDE_INT
);
5293 for (; i
< elem_bitsize
; i
+= value_bit
)
5303 /* Now, pick the right byte to start with. */
5304 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5305 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5306 will already have offset 0. */
5307 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5309 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5311 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5312 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5313 byte
= (subword_byte
% UNITS_PER_WORD
5314 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5317 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5318 so if it's become negative it will instead be very large.) */
5319 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5321 /* Convert from bytes to chunks of size value_bit. */
5322 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5324 /* Re-pack the value. */
5326 if (VECTOR_MODE_P (outermode
))
5328 num_elem
= GET_MODE_NUNITS (outermode
);
5329 result_v
= rtvec_alloc (num_elem
);
5330 elems
= &RTVEC_ELT (result_v
, 0);
5331 outer_submode
= GET_MODE_INNER (outermode
);
5337 outer_submode
= outermode
;
5340 outer_class
= GET_MODE_CLASS (outer_submode
);
5341 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5343 gcc_assert (elem_bitsize
% value_bit
== 0);
5344 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5346 for (elem
= 0; elem
< num_elem
; elem
++)
5350 /* Vectors are stored in target memory order. (This is probably
5353 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5354 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5356 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5357 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5358 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5359 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5360 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5363 switch (outer_class
)
5366 case MODE_PARTIAL_INT
:
5368 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5371 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5373 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5374 for (; i
< elem_bitsize
; i
+= value_bit
)
5375 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5376 << (i
- HOST_BITS_PER_WIDE_INT
);
5378 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5380 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5381 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5382 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5383 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5390 case MODE_DECIMAL_FLOAT
:
5393 long tmp
[max_bitsize
/ 32];
5395 /* real_from_target wants its input in words affected by
5396 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5397 and use WORDS_BIG_ENDIAN instead; see the documentation
5398 of SUBREG in rtl.texi. */
5399 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5401 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5404 if (WORDS_BIG_ENDIAN
)
5405 ibase
= elem_bitsize
- 1 - i
;
5408 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5411 real_from_target (&r
, tmp
, outer_submode
);
5412 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5424 f
.mode
= outer_submode
;
5427 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5429 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5430 for (; i
< elem_bitsize
; i
+= value_bit
)
5431 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5432 << (i
- HOST_BITS_PER_WIDE_INT
));
5434 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5442 if (VECTOR_MODE_P (outermode
))
5443 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5448 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5449 Return 0 if no simplifications are possible. */
5451 simplify_subreg (enum machine_mode outermode
, rtx op
,
5452 enum machine_mode innermode
, unsigned int byte
)
5454 /* Little bit of sanity checking. */
5455 gcc_assert (innermode
!= VOIDmode
);
5456 gcc_assert (outermode
!= VOIDmode
);
5457 gcc_assert (innermode
!= BLKmode
);
5458 gcc_assert (outermode
!= BLKmode
);
5460 gcc_assert (GET_MODE (op
) == innermode
5461 || GET_MODE (op
) == VOIDmode
);
5463 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5464 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5466 if (outermode
== innermode
&& !byte
)
5469 if (CONST_INT_P (op
)
5470 || CONST_DOUBLE_P (op
)
5471 || GET_CODE (op
) == CONST_FIXED
5472 || GET_CODE (op
) == CONST_VECTOR
)
5473 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5475 /* Changing mode twice with SUBREG => just change it once,
5476 or not at all if changing back op starting mode. */
5477 if (GET_CODE (op
) == SUBREG
)
5479 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5480 int final_offset
= byte
+ SUBREG_BYTE (op
);
5483 if (outermode
== innermostmode
5484 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5485 return SUBREG_REG (op
);
5487 /* The SUBREG_BYTE represents offset, as if the value were stored
5488 in memory. Irritating exception is paradoxical subreg, where
5489 we define SUBREG_BYTE to be 0. On big endian machines, this
5490 value should be negative. For a moment, undo this exception. */
5491 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5493 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5494 if (WORDS_BIG_ENDIAN
)
5495 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5496 if (BYTES_BIG_ENDIAN
)
5497 final_offset
+= difference
% UNITS_PER_WORD
;
5499 if (SUBREG_BYTE (op
) == 0
5500 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5502 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5503 if (WORDS_BIG_ENDIAN
)
5504 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5505 if (BYTES_BIG_ENDIAN
)
5506 final_offset
+= difference
% UNITS_PER_WORD
;
5509 /* See whether resulting subreg will be paradoxical. */
5510 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5512 /* In nonparadoxical subregs we can't handle negative offsets. */
5513 if (final_offset
< 0)
5515 /* Bail out in case resulting subreg would be incorrect. */
5516 if (final_offset
% GET_MODE_SIZE (outermode
)
5517 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5523 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5525 /* In paradoxical subreg, see if we are still looking on lower part.
5526 If so, our SUBREG_BYTE will be 0. */
5527 if (WORDS_BIG_ENDIAN
)
5528 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5529 if (BYTES_BIG_ENDIAN
)
5530 offset
+= difference
% UNITS_PER_WORD
;
5531 if (offset
== final_offset
)
5537 /* Recurse for further possible simplifications. */
5538 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5542 if (validate_subreg (outermode
, innermostmode
,
5543 SUBREG_REG (op
), final_offset
))
5545 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5546 if (SUBREG_PROMOTED_VAR_P (op
)
5547 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5548 && GET_MODE_CLASS (outermode
) == MODE_INT
5549 && IN_RANGE (GET_MODE_SIZE (outermode
),
5550 GET_MODE_SIZE (innermode
),
5551 GET_MODE_SIZE (innermostmode
))
5552 && subreg_lowpart_p (newx
))
5554 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5555 SUBREG_PROMOTED_UNSIGNED_SET
5556 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5563 /* Merge implicit and explicit truncations. */
5565 if (GET_CODE (op
) == TRUNCATE
5566 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5567 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5568 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5569 GET_MODE (XEXP (op
, 0)));
5571 /* SUBREG of a hard register => just change the register number
5572 and/or mode. If the hard register is not valid in that mode,
5573 suppress this simplification. If the hard register is the stack,
5574 frame, or argument pointer, leave this as a SUBREG. */
5576 if (REG_P (op
) && HARD_REGISTER_P (op
))
5578 unsigned int regno
, final_regno
;
5581 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5582 if (HARD_REGISTER_NUM_P (final_regno
))
5585 int final_offset
= byte
;
5587 /* Adjust offset for paradoxical subregs. */
5589 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5591 int difference
= (GET_MODE_SIZE (innermode
)
5592 - GET_MODE_SIZE (outermode
));
5593 if (WORDS_BIG_ENDIAN
)
5594 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5595 if (BYTES_BIG_ENDIAN
)
5596 final_offset
+= difference
% UNITS_PER_WORD
;
5599 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5601 /* Propagate original regno. We don't have any way to specify
5602 the offset inside original regno, so do so only for lowpart.
5603 The information is used only by alias analysis that can not
5604 grog partial register anyway. */
5606 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5607 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5612 /* If we have a SUBREG of a register that we are replacing and we are
5613 replacing it with a MEM, make a new MEM and try replacing the
5614 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5615 or if we would be widening it. */
5618 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5619 /* Allow splitting of volatile memory references in case we don't
5620 have instruction to move the whole thing. */
5621 && (! MEM_VOLATILE_P (op
)
5622 || ! have_insn_for (SET
, innermode
))
5623 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5624 return adjust_address_nv (op
, outermode
, byte
);
5626 /* Handle complex values represented as CONCAT
5627 of real and imaginary part. */
5628 if (GET_CODE (op
) == CONCAT
)
5630 unsigned int part_size
, final_offset
;
5633 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5634 if (byte
< part_size
)
5636 part
= XEXP (op
, 0);
5637 final_offset
= byte
;
5641 part
= XEXP (op
, 1);
5642 final_offset
= byte
- part_size
;
5645 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5648 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5651 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5652 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5656 /* Optimize SUBREG truncations of zero and sign extended values. */
5657 if ((GET_CODE (op
) == ZERO_EXTEND
5658 || GET_CODE (op
) == SIGN_EXTEND
)
5659 && SCALAR_INT_MODE_P (innermode
)
5660 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5662 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5664 /* If we're requesting the lowpart of a zero or sign extension,
5665 there are three possibilities. If the outermode is the same
5666 as the origmode, we can omit both the extension and the subreg.
5667 If the outermode is not larger than the origmode, we can apply
5668 the truncation without the extension. Finally, if the outermode
5669 is larger than the origmode, but both are integer modes, we
5670 can just extend to the appropriate mode. */
5673 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5674 if (outermode
== origmode
)
5675 return XEXP (op
, 0);
5676 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5677 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5678 subreg_lowpart_offset (outermode
,
5680 if (SCALAR_INT_MODE_P (outermode
))
5681 return simplify_gen_unary (GET_CODE (op
), outermode
,
5682 XEXP (op
, 0), origmode
);
5685 /* A SUBREG resulting from a zero extension may fold to zero if
5686 it extracts higher bits that the ZERO_EXTEND's source bits. */
5687 if (GET_CODE (op
) == ZERO_EXTEND
5688 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5689 return CONST0_RTX (outermode
);
5692 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5693 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5694 the outer subreg is effectively a truncation to the original mode. */
5695 if ((GET_CODE (op
) == LSHIFTRT
5696 || GET_CODE (op
) == ASHIFTRT
)
5697 && SCALAR_INT_MODE_P (outermode
)
5698 && SCALAR_INT_MODE_P (innermode
)
5699 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5700 to avoid the possibility that an outer LSHIFTRT shifts by more
5701 than the sign extension's sign_bit_copies and introduces zeros
5702 into the high bits of the result. */
5703 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5704 && CONST_INT_P (XEXP (op
, 1))
5705 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5706 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5707 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5708 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5709 return simplify_gen_binary (ASHIFTRT
, outermode
,
5710 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5712 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5713 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5714 the outer subreg is effectively a truncation to the original mode. */
5715 if ((GET_CODE (op
) == LSHIFTRT
5716 || GET_CODE (op
) == ASHIFTRT
)
5717 && SCALAR_INT_MODE_P (outermode
)
5718 && SCALAR_INT_MODE_P (innermode
)
5719 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5720 && CONST_INT_P (XEXP (op
, 1))
5721 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5722 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5723 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5724 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5725 return simplify_gen_binary (LSHIFTRT
, outermode
,
5726 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5728 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5729 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5730 the outer subreg is effectively a truncation to the original mode. */
5731 if (GET_CODE (op
) == ASHIFT
5732 && SCALAR_INT_MODE_P (outermode
)
5733 && SCALAR_INT_MODE_P (innermode
)
5734 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5735 && CONST_INT_P (XEXP (op
, 1))
5736 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5737 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5738 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5739 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5740 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5741 return simplify_gen_binary (ASHIFT
, outermode
,
5742 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5744 /* Recognize a word extraction from a multi-word subreg. */
5745 if ((GET_CODE (op
) == LSHIFTRT
5746 || GET_CODE (op
) == ASHIFTRT
)
5747 && SCALAR_INT_MODE_P (innermode
)
5748 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5749 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5750 && CONST_INT_P (XEXP (op
, 1))
5751 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5752 && INTVAL (XEXP (op
, 1)) >= 0
5753 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5754 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5756 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5757 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5759 ? byte
- shifted_bytes
5760 : byte
+ shifted_bytes
));
5763 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5764 and try replacing the SUBREG and shift with it. Don't do this if
5765 the MEM has a mode-dependent address or if we would be widening it. */
5767 if ((GET_CODE (op
) == LSHIFTRT
5768 || GET_CODE (op
) == ASHIFTRT
)
5769 && SCALAR_INT_MODE_P (innermode
)
5770 && MEM_P (XEXP (op
, 0))
5771 && CONST_INT_P (XEXP (op
, 1))
5772 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5773 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5774 && INTVAL (XEXP (op
, 1)) > 0
5775 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5776 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
5777 MEM_ADDR_SPACE (XEXP (op
, 0)))
5778 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5779 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5780 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5781 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5783 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5784 return adjust_address_nv (XEXP (op
, 0), outermode
,
5786 ? byte
- shifted_bytes
5787 : byte
+ shifted_bytes
));
5793 /* Make a SUBREG operation or equivalent if it folds. */
5796 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5797 enum machine_mode innermode
, unsigned int byte
)
5801 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5805 if (GET_CODE (op
) == SUBREG
5806 || GET_CODE (op
) == CONCAT
5807 || GET_MODE (op
) == VOIDmode
)
5810 if (validate_subreg (outermode
, innermode
, op
, byte
))
5811 return gen_rtx_SUBREG (outermode
, op
, byte
);
5816 /* Simplify X, an rtx expression.
5818 Return the simplified expression or NULL if no simplifications
5821 This is the preferred entry point into the simplification routines;
5822 however, we still allow passes to call the more specific routines.
5824 Right now GCC has three (yes, three) major bodies of RTL simplification
5825 code that need to be unified.
5827 1. fold_rtx in cse.c. This code uses various CSE specific
5828 information to aid in RTL simplification.
5830 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5831 it uses combine specific information to aid in RTL
5834 3. The routines in this file.
5837 Long term we want to only have one body of simplification code; to
5838 get to that state I recommend the following steps:
5840 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5841 which are not pass dependent state into these routines.
5843 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5844 use this routine whenever possible.
5846 3. Allow for pass dependent state to be provided to these
5847 routines and add simplifications based on the pass dependent
5848 state. Remove code from cse.c & combine.c that becomes
5851 It will take time, but ultimately the compiler will be easier to
5852 maintain and improve. It's totally silly that when we add a
5853 simplification that it needs to be added to 4 places (3 for RTL
5854 simplification and 1 for tree simplification. */
5857 simplify_rtx (const_rtx x
)
5859 const enum rtx_code code
= GET_CODE (x
);
5860 const enum machine_mode mode
= GET_MODE (x
);
5862 switch (GET_RTX_CLASS (code
))
5865 return simplify_unary_operation (code
, mode
,
5866 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5867 case RTX_COMM_ARITH
:
5868 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5869 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5871 /* Fall through.... */
5874 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5877 case RTX_BITFIELD_OPS
:
5878 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5879 XEXP (x
, 0), XEXP (x
, 1),
5883 case RTX_COMM_COMPARE
:
5884 return simplify_relational_operation (code
, mode
,
5885 ((GET_MODE (XEXP (x
, 0))
5887 ? GET_MODE (XEXP (x
, 0))
5888 : GET_MODE (XEXP (x
, 1))),
5894 return simplify_subreg (mode
, SUBREG_REG (x
),
5895 GET_MODE (SUBREG_REG (x
)),
5902 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5903 if (GET_CODE (XEXP (x
, 0)) == HIGH
5904 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))